From a8da36b913e711dc285ddd360d94e65d9261d8c1 Mon Sep 17 00:00:00 2001 From: Tim Myers Date: Sun, 19 Jan 2020 13:49:59 -0700 Subject: [PATCH 01/16] fix(dockerfile): Add ca-certificates so eth1 calls work in docker. (#796) --- Dockerfile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 1a19ddee3f..6fd9a3bd0a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,5 +3,9 @@ COPY . lighthouse RUN cd lighthouse && make && cargo clean FROM debian:buster-slim -RUN apt-get update && apt-get install -y libssl-dev +RUN apt-get update && apt-get install -y --no-install-recommends \ + libssl-dev \ + ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse From 661ef65de8263666bec7bc5d1acbadda0b950075 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 20 Jan 2020 02:22:59 +0530 Subject: [PATCH 02/16] Persist eth1 cache (#760) * Add intermediate structures for bytes conversion * Expose byte conversion methods from `Eth1Service` * Add eth1 ssz containers * Fix type errors * Load eth1 cache on restart * Fix compile errors * Update Cargo.lock * Add comments and minor formatting * Add test for eth1 cache persistence * Restrict Deposit and Block cache field visibility * Add checks * Fix `SszDepositCache` check * Implement Encode/Decode directly on `BlockCache` --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 5 +- beacon_node/beacon_chain/src/builder.rs | 18 +-- beacon_node/beacon_chain/src/eth1_chain.rs | 110 +++++++++++++++--- .../src/persisted_beacon_chain.rs | 2 + beacon_node/beacon_chain/src/test_utils.rs | 3 +- beacon_node/client/src/builder.rs | 21 ++-- beacon_node/eth1/Cargo.toml | 1 + beacon_node/eth1/src/block_cache.rs | 5 +- beacon_node/eth1/src/deposit_cache.rs | 43 +++++++ beacon_node/eth1/src/deposit_log.rs | 3 +- beacon_node/eth1/src/inner.rs | 50 +++++++- beacon_node/eth1/src/lib.rs | 1 + beacon_node/eth1/src/service.rs | 14 +++ beacon_node/eth1/tests/test.rs | 74 ++++++++++++ 15 files changed, 315 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a0a3f76bd8..05787dde38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1087,6 +1087,7 @@ dependencies = [ "eth1_test_rig 0.1.0", "eth2_hashing 0.1.1", "eth2_ssz 0.1.2", + "eth2_ssz_derive 0.1.0", "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 99e8f4b050..cf55297a77 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -116,7 +116,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type StoreMigrator: store::Migrate; type SlotClock: slot_clock::SlotClock; type LmdGhost: LmdGhost; - type Eth1Chain: Eth1ChainBackend; + type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; type EventHandler: EventHandler; } @@ -135,7 +135,7 @@ pub struct BeaconChain { /// inclusion in a block. pub op_pool: OperationPool, /// Provides information from the Ethereum 1 (PoW) chain. - pub eth1_chain: Option>, + pub eth1_chain: Option>, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. pub(crate) canonical_head: TimeoutRwLock>, /// The root of the genesis block. @@ -190,6 +190,7 @@ impl BeaconChain { genesis_block_root: self.genesis_block_root, ssz_head_tracker: self.head_tracker.to_ssz_container(), fork_choice: self.fork_choice.as_ssz_container(), + eth1_cache: self.eth1_chain.as_ref().map(|x| x.as_ssz_container()), block_root_tree: self.block_root_tree.as_ssz_container(), }; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 25201c0910..5738d494e8 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -57,7 +57,7 @@ where TStoreMigrator: store::Migrate + 'static, TSlotClock: SlotClock + 'static, TLmdGhost: LmdGhost + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -87,7 +87,7 @@ pub struct BeaconChainBuilder { genesis_block_root: Option, op_pool: Option>, fork_choice: Option>, - eth1_chain: Option>, + eth1_chain: Option>, event_handler: Option, slot_clock: Option, persisted_beacon_chain: Option>, @@ -114,7 +114,7 @@ where TStoreMigrator: store::Migrate + 'static, TSlotClock: SlotClock + 'static, TLmdGhost: LmdGhost + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -175,7 +175,7 @@ where /// Attempt to load an existing chain from the builder's `Store`. /// /// May initialize several components; including the op_pool and finalized checkpoints. - pub fn resume_from_db(mut self) -> Result { + pub fn resume_from_db(mut self, config: Eth1Config) -> Result { let log = self .log .as_ref() @@ -226,6 +226,10 @@ where HeadTracker::from_ssz_container(&p.ssz_head_tracker) .map_err(|e| format!("Failed to decode head tracker for database: {:?}", e))?, ); + self.eth1_chain = match &p.eth1_cache { + Some(cache) => Some(Eth1Chain::from_ssz_container(cache, config, store, log)?), + None => None, + }; self.block_root_tree = Some(Arc::new(p.block_root_tree.clone().into())); self.persisted_beacon_chain = Some(p); @@ -422,7 +426,7 @@ where TStore: Store + 'static, TStoreMigrator: store::Migrate + 'static, TSlotClock: SlotClock + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -541,7 +545,7 @@ where TStore: Store + 'static, TStoreMigrator: store::Migrate + 'static, TLmdGhost: LmdGhost + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -583,7 +587,7 @@ where TStoreMigrator: store::Migrate + 'static, TSlotClock: SlotClock + 'static, TLmdGhost: LmdGhost + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, { /// Sets the `BeaconChain` event handler to `NullEventHandler`. diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index ce11b75d7e..9b7f772048 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -6,6 +6,7 @@ use futures::Future; use integer_sqrt::IntegerSquareRoot; use rand::prelude::*; use slog::{crit, debug, error, trace, Logger}; +use ssz_derive::{Decode, Encode}; use state_processing::per_block_processing::get_new_eth1_data; use std::collections::HashMap; use std::iter::DoubleEndedIterator; @@ -48,23 +49,31 @@ pub enum Error { UnknownPreviousEth1BlockHash, } +#[derive(Encode, Decode, Clone)] +pub struct SszEth1 { + use_dummy_backend: bool, + backend_bytes: Vec, +} + /// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. -pub struct Eth1Chain +pub struct Eth1Chain where - T: Eth1ChainBackend, + T: Eth1ChainBackend, E: EthSpec, + S: Store, { backend: T, /// When `true`, the backend will be ignored and dummy data from the 2019 Canada interop method /// will be used instead. pub use_dummy_backend: bool, - _phantom: PhantomData, + _phantom: PhantomData<(E, S)>, } -impl Eth1Chain +impl Eth1Chain where - T: Eth1ChainBackend, + T: Eth1ChainBackend, E: EthSpec, + S: Store, { pub fn new(backend: T) -> Self { Self { @@ -82,7 +91,8 @@ where spec: &ChainSpec, ) -> Result { if self.use_dummy_backend { - DummyEth1ChainBackend::default().eth1_data(state, spec) + let dummy_backend: DummyEth1ChainBackend = DummyEth1ChainBackend::default(); + dummy_backend.eth1_data(state, spec) } else { self.backend.eth1_data(state, spec) } @@ -103,14 +113,41 @@ where spec: &ChainSpec, ) -> Result, Error> { if self.use_dummy_backend { - DummyEth1ChainBackend::default().queued_deposits(state, eth1_data_vote, spec) + let dummy_backend: DummyEth1ChainBackend = DummyEth1ChainBackend::default(); + dummy_backend.queued_deposits(state, eth1_data_vote, spec) } else { self.backend.queued_deposits(state, eth1_data_vote, spec) } } + + /// Instantiate `Eth1Chain` from a persisted `SszEth1`. + /// + /// The `Eth1Chain` will have the same caches as the persisted `SszEth1`. + pub fn from_ssz_container( + ssz_container: &SszEth1, + config: Eth1Config, + store: Arc, + log: &Logger, + ) -> Result { + let backend = + Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, store, log.clone())?; + Ok(Self { + use_dummy_backend: ssz_container.use_dummy_backend, + backend, + _phantom: PhantomData, + }) + } + + /// Return a `SszEth1` containing the state of `Eth1Chain`. + pub fn as_ssz_container(&self) -> SszEth1 { + SszEth1 { + use_dummy_backend: self.use_dummy_backend, + backend_bytes: self.backend.as_bytes(), + } + } } -pub trait Eth1ChainBackend: Sized + Send + Sync { +pub trait Eth1ChainBackend>: Sized + Send + Sync { /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. fn eth1_data(&self, beacon_state: &BeaconState, spec: &ChainSpec) @@ -129,6 +166,17 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { eth1_data_vote: &Eth1Data, spec: &ChainSpec, ) -> Result, Error>; + + /// Encode the `Eth1ChainBackend` instance to bytes. + fn as_bytes(&self) -> Vec; + + /// Create a `Eth1ChainBackend` instance given encoded bytes. + fn from_bytes( + bytes: &[u8], + config: Eth1Config, + store: Arc, + log: Logger, + ) -> Result; } /// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data. @@ -136,9 +184,9 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { /// Never creates deposits, therefore the validator set is static. /// /// This was used in the 2019 Canada interop workshops. -pub struct DummyEth1ChainBackend(PhantomData); +pub struct DummyEth1ChainBackend>(PhantomData<(T, S)>); -impl Eth1ChainBackend for DummyEth1ChainBackend { +impl> Eth1ChainBackend for DummyEth1ChainBackend { /// Produce some deterministic junk based upon the current epoch. fn eth1_data(&self, state: &BeaconState, _spec: &ChainSpec) -> Result { let current_epoch = state.current_epoch(); @@ -164,9 +212,24 @@ impl Eth1ChainBackend for DummyEth1ChainBackend { ) -> Result, Error> { Ok(vec![]) } + + /// Return empty Vec for dummy backend. + fn as_bytes(&self) -> Vec { + Vec::new() + } + + /// Create dummy eth1 backend. + fn from_bytes( + _bytes: &[u8], + _config: Eth1Config, + _store: Arc, + _log: Logger, + ) -> Result { + Ok(Self(PhantomData)) + } } -impl Default for DummyEth1ChainBackend { +impl> Default for DummyEth1ChainBackend { fn default() -> Self { Self(PhantomData) } @@ -214,7 +277,7 @@ impl> CachingEth1Backend { } } -impl> Eth1ChainBackend for CachingEth1Backend { +impl> Eth1ChainBackend for CachingEth1Backend { fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { // Note: we do not return random junk if this function call fails as it would be caused by // an internal error. @@ -346,6 +409,27 @@ impl> Eth1ChainBackend for CachingEth1Backend { .map(|(_deposit_root, deposits)| deposits) } } + + /// Return encoded byte representation of the block and deposit caches. + fn as_bytes(&self) -> Vec { + self.core.as_bytes() + } + + /// Recover the cached backend from encoded bytes. + fn from_bytes( + bytes: &[u8], + config: Eth1Config, + store: Arc, + log: Logger, + ) -> Result { + let inner = HttpService::from_bytes(bytes, config, log.clone())?; + Ok(Self { + core: inner, + store, + log, + _phantom: PhantomData, + }) + } } /// Produces an `Eth1Data` with all fields sourced from `rand::thread_rng()`. @@ -584,7 +668,7 @@ mod test { use store::MemoryStore; use types::test_utils::{generate_deterministic_keypair, TestingDepositBuilder}; - fn get_eth1_chain() -> Eth1Chain>, E> { + fn get_eth1_chain() -> Eth1Chain>, E, MemoryStore> { let eth1_config = Eth1Config { ..Eth1Config::default() }; diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index 7f42466e22..a0e281a98c 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -1,3 +1,4 @@ +use crate::eth1_chain::SszEth1; use crate::fork_choice::SszForkChoice; use crate::head_tracker::SszHeadTracker; use crate::{BeaconChainTypes, CheckPoint}; @@ -18,6 +19,7 @@ pub struct PersistedBeaconChain { pub genesis_block_root: Hash256, pub ssz_head_tracker: SszHeadTracker, pub fork_choice: SszForkChoice, + pub eth1_cache: Option, pub block_root_tree: SszBlockRootTree, } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 9aad237e1f..fbf437fd18 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -4,6 +4,7 @@ use crate::{ events::NullEventHandler, AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, }; +use eth1::Config as Eth1Config; use genesis::interop_genesis_state; use lmd_ghost::ThreadSafeReducedTree; use rayon::prelude::*; @@ -175,7 +176,7 @@ impl BeaconChainHarness> { .custom_spec(spec.clone()) .store(store.clone()) .store_migrator( as Migrate<_, E>>::new(store)) - .resume_from_db() + .resume_from_db(Eth1Config::default()) .expect("should resume beacon chain from db") .dummy_eth1_backend() .expect("should build dummy backend") diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 4a219ef0b1..d35fbeb46f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -84,7 +84,7 @@ where TStoreMigrator: store::Migrate, TSlotClock: SlotClock + Clone + 'static, TLmdGhost: LmdGhost + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -241,7 +241,10 @@ where Box::new(future) } ClientGenesis::Resume => { - let future = builder.resume_from_db().into_future().map(|v| (v, None)); + let future = builder + .resume_from_db(config) + .into_future() + .map(|v| (v, None)); Box::new(future) } @@ -401,7 +404,7 @@ where TStore: Store + 'static, TStoreMigrator: store::Migrate, TSlotClock: SlotClock + Clone + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -449,7 +452,7 @@ where TStoreMigrator: store::Migrate, TSlotClock: SlotClock + 'static, TLmdGhost: LmdGhost + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, { /// Specifies that the `BeaconChain` should publish events using the WebSocket server. @@ -498,7 +501,7 @@ where TSlotClock: SlotClock + 'static, TStoreMigrator: store::Migrate, TEthSpec> + 'static, TLmdGhost: LmdGhost, TEthSpec> + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -548,7 +551,7 @@ where TSlotClock: SlotClock + 'static, TStoreMigrator: store::Migrate, TEthSpec> + 'static, TLmdGhost: LmdGhost, TEthSpec> + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -576,7 +579,7 @@ impl where TSlotClock: SlotClock + 'static, TLmdGhost: LmdGhost, TEthSpec> + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -606,7 +609,7 @@ impl where TSlotClock: SlotClock + 'static, TLmdGhost: LmdGhost, TEthSpec> + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -737,7 +740,7 @@ where TStore: Store + 'static, TStoreMigrator: store::Migrate, TLmdGhost: LmdGhost + 'static, - TEth1Backend: Eth1ChainBackend + 'static, + TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 604e0a2bc5..03e592940b 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -19,6 +19,7 @@ hex = "0.3" types = { path = "../../eth2/types"} merkle_proof = { path = "../../eth2/utils/merkle_proof"} eth2_ssz = { path = "../../eth2/utils/ssz"} +eth2_ssz_derive = "0.1.0" tree_hash = { path = "../../eth2/utils/tree_hash"} eth2_hashing = { path = "../../eth2/utils/eth2_hashing"} parking_lot = "0.7" diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index bdf4060b90..088d8dcfdc 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -1,3 +1,4 @@ +use ssz_derive::{Decode, Encode}; use std::ops::RangeInclusive; use types::{Eth1Data, Hash256}; @@ -17,7 +18,7 @@ pub enum Error { /// A block of the eth1 chain. /// /// Contains all information required to add a `BlockCache` entry. -#[derive(Debug, PartialEq, Clone, Eq, Hash)] +#[derive(Debug, PartialEq, Clone, Eq, Hash, Encode, Decode)] pub struct Eth1Block { pub hash: Hash256, pub timestamp: u64, @@ -38,7 +39,7 @@ impl Eth1Block { /// Stores block and deposit contract information and provides queries based upon the block /// timestamp. -#[derive(Debug, PartialEq, Clone, Default)] +#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] pub struct BlockCache { blocks: Vec, } diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index eb31bccc51..b3fe67b7ec 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -1,5 +1,6 @@ use crate::DepositLog; use eth2_hashing::hash; +use ssz_derive::{Decode, Encode}; use tree_hash::TreeHash; use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH}; @@ -79,6 +80,48 @@ impl DepositDataTree { } } +#[derive(Encode, Decode, Clone)] +pub struct SszDepositCache { + logs: Vec, + leaves: Vec, + deposit_contract_deploy_block: u64, + deposit_roots: Vec, +} + +impl SszDepositCache { + pub fn from_deposit_cache(cache: &DepositCache) -> Self { + Self { + logs: cache.logs.clone(), + leaves: cache.leaves.clone(), + deposit_contract_deploy_block: cache.deposit_contract_deploy_block, + deposit_roots: cache.deposit_roots.clone(), + } + } + + pub fn to_deposit_cache(&self) -> Result { + let deposit_tree = + DepositDataTree::create(&self.leaves, self.leaves.len(), DEPOSIT_TREE_DEPTH); + // Check for invalid SszDepositCache conditions + if self.leaves.len() != self.logs.len() { + return Err("Invalid SszDepositCache: logs and leaves should have equal length".into()); + } + // `deposit_roots` also includes the zero root + if self.leaves.len() + 1 != self.deposit_roots.len() { + return Err( + "Invalid SszDepositCache: deposit_roots length must be only one more than leaves" + .into(), + ); + } + Ok(DepositCache { + logs: self.logs.clone(), + leaves: self.leaves.clone(), + deposit_contract_deploy_block: self.deposit_contract_deploy_block, + deposit_tree, + deposit_roots: self.deposit_roots.clone(), + }) + } +} + /// Mirrors the merkle tree of deposits in the eth1 deposit contract. /// /// Provides `Deposit` objects with merkle proofs included. diff --git a/beacon_node/eth1/src/deposit_log.rs b/beacon_node/eth1/src/deposit_log.rs index d42825c756..4acb902a73 100644 --- a/beacon_node/eth1/src/deposit_log.rs +++ b/beacon_node/eth1/src/deposit_log.rs @@ -1,5 +1,6 @@ use super::http::Log; use ssz::Decode; +use ssz_derive::{Decode, Encode}; use types::{DepositData, Hash256, PublicKeyBytes, SignatureBytes}; /// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The @@ -16,7 +17,7 @@ const INDEX_START: usize = SIG_START + 96 + 32; const INDEX_LEN: usize = 8; /// A fully parsed eth1 deposit contract log. -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Encode, Decode)] pub struct DepositLog { pub deposit_data: DepositData, /// The block number of the log that included this `DepositData`. diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index 76b19cb0af..eac5834e10 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -1,6 +1,11 @@ use crate::Config; -use crate::{block_cache::BlockCache, deposit_cache::DepositCache}; +use crate::{ + block_cache::BlockCache, + deposit_cache::{DepositCache, SszDepositCache}, +}; use parking_lot::RwLock; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; #[derive(Default)] pub struct DepositUpdater { @@ -34,4 +39,47 @@ impl Inner { self.block_cache.write().truncate(block_cache_truncation); } } + + /// Encode the eth1 block and deposit cache as bytes. + pub fn as_bytes(&self) -> Vec { + let ssz_eth1_cache = SszEth1Cache::from_inner(&self); + ssz_eth1_cache.as_ssz_bytes() + } + + /// Recover `Inner` given byte representation of eth1 deposit and block caches. + pub fn from_bytes(bytes: &[u8], config: Config) -> Result { + let ssz_cache = SszEth1Cache::from_ssz_bytes(bytes) + .map_err(|e| format!("Ssz decoding error: {:?}", e))?; + Ok(ssz_cache.to_inner(config)?) + } +} + +#[derive(Encode, Decode, Clone)] +pub struct SszEth1Cache { + block_cache: BlockCache, + deposit_cache: SszDepositCache, + last_processed_block: Option, +} + +impl SszEth1Cache { + pub fn from_inner(inner: &Inner) -> Self { + let deposit_updater = inner.deposit_cache.read(); + let block_cache = inner.block_cache.read(); + Self { + block_cache: (*block_cache).clone(), + deposit_cache: SszDepositCache::from_deposit_cache(&deposit_updater.cache), + last_processed_block: deposit_updater.last_processed_block, + } + } + + pub fn to_inner(&self, config: Config) -> Result { + Ok(Inner { + block_cache: RwLock::new(self.block_cache.clone()), + deposit_cache: RwLock::new(DepositUpdater { + cache: self.deposit_cache.to_deposit_cache()?, + last_processed_block: self.last_processed_block, + }), + config: RwLock::new(config), + }) + } } diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index b3352c81a7..f5f018bd17 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -12,4 +12,5 @@ mod service; pub use block_cache::{BlockCache, Eth1Block}; pub use deposit_cache::DepositCache; pub use deposit_log::DepositLog; +pub use inner::SszEth1Cache; pub use service::{BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service}; diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index e6a763abf6..383df1d4a6 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -151,6 +151,20 @@ impl Service { } } + /// Return byte representation of deposit and block caches. + pub fn as_bytes(&self) -> Vec { + self.inner.as_bytes() + } + + /// Recover the deposit and block caches from encoded bytes. + pub fn from_bytes(bytes: &[u8], config: Config, log: Logger) -> Result { + let inner = Inner::from_bytes(bytes, config)?; + Ok(Self { + inner: Arc::new(inner), + log, + }) + } + /// Provides access to the block cache. pub fn blocks(&self) -> &RwLock { &self.inner.block_cache diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index f6312f07e6..d8597b9849 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -803,3 +803,77 @@ mod fast { } } } + +mod persist { + use super::*; + #[test] + fn test_persisit_caches() { + let mut env = new_env(); + let log = env.core_context().log; + let runtime = env.runtime(); + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let now = get_block_number(runtime, &web3); + let config = Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: now, + lowest_cached_block_number: now, + follow_distance: 0, + block_cache_truncation: None, + ..Config::default() + }; + let service = Service::new(config.clone(), log.clone()); + let n = 10; + let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect(); + for deposit in &deposits { + deposit_contract + .deposit(runtime, deposit.clone()) + .expect("should perform a deposit"); + } + + runtime + .block_on(service.update_deposit_cache()) + .expect("should perform update"); + + assert!( + service.deposit_cache_len() >= n, + "should have imported n deposits" + ); + + let deposit_count = service.deposit_cache_len(); + + runtime + .block_on(service.update_block_cache()) + .expect("should perform update"); + + assert!( + service.block_cache_len() >= n, + "should have imported n eth1 blocks" + ); + + let block_count = service.block_cache_len(); + + let eth1_bytes = service.as_bytes(); + + // Drop service and recover from bytes + drop(service); + + let recovered_service = Service::from_bytes(ð1_bytes, config, log).unwrap(); + assert_eq!( + recovered_service.block_cache_len(), + block_count, + "Should have equal cached blocks as before recovery" + ); + assert_eq!( + recovered_service.deposit_cache_len(), + deposit_count, + "Should have equal cached deposits as before recovery" + ); + } +} From cb13129cd6cda96835b1066a1846121cf9dba387 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 20 Jan 2020 02:23:55 +0530 Subject: [PATCH 03/16] Persist eth1 cache (#760) * Add intermediate structures for bytes conversion * Expose byte conversion methods from `Eth1Service` * Add eth1 ssz containers * Fix type errors * Load eth1 cache on restart * Fix compile errors * Update Cargo.lock * Add comments and minor formatting * Add test for eth1 cache persistence * Restrict Deposit and Block cache field visibility * Add checks * Fix `SszDepositCache` check * Implement Encode/Decode directly on `BlockCache` From 3ba221e388030f158d926b76d53f46690c57bb69 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 20 Jan 2020 02:24:20 +0530 Subject: [PATCH 04/16] Persist eth1 cache (#760) * Add intermediate structures for bytes conversion * Expose byte conversion methods from `Eth1Service` * Add eth1 ssz containers * Fix type errors * Load eth1 cache on restart * Fix compile errors * Update Cargo.lock * Add comments and minor formatting * Add test for eth1 cache persistence * Restrict Deposit and Block cache field visibility * Add checks * Fix `SszDepositCache` check * Implement Encode/Decode directly on `BlockCache` From 82b55ea4180e75f23b1dd09ac782bd3bf30fa581 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 20 Jan 2020 02:32:48 +0530 Subject: [PATCH 05/16] Persist eth1 cache (#760) * Add intermediate structures for bytes conversion * Expose byte conversion methods from `Eth1Service` * Add eth1 ssz containers * Fix type errors * Load eth1 cache on restart * Fix compile errors * Update Cargo.lock * Add comments and minor formatting * Add test for eth1 cache persistence * Restrict Deposit and Block cache field visibility * Add checks * Fix `SszDepositCache` check * Implement Encode/Decode directly on `BlockCache` From 4632e9ce52c90e12e94bfc60c8f77987a9904054 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 15 Jan 2020 15:36:12 +1100 Subject: [PATCH 06/16] Document the freezer DB space-time trade-off (#808) --- book/src/SUMMARY.md | 2 ++ book/src/advanced.md | 9 ++++++ book/src/advanced_database.md | 60 +++++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 book/src/advanced.md create mode 100644 book/src/advanced_database.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 8253cd0630..df027e1b01 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -16,5 +16,7 @@ * [/consensus](./http_consensus.md) * [/network](./http_network.md) * [WebSocket](./websockets.md) +* [Advanced Usage](./advanced.md) + * [Database Configuration](./advanced_database.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) diff --git a/book/src/advanced.md b/book/src/advanced.md new file mode 100644 index 0000000000..d46cae6990 --- /dev/null +++ b/book/src/advanced.md @@ -0,0 +1,9 @@ +# Advanced Usage + +Want to get into the nitty-gritty of Lighthouse configuration? Looking for something not covered +elsewhere? + +This section provides detailed information about configuring Lighthouse for specific use cases, and +tips about how things work under the hood. + +* [Advanced Database Configuration](./advanced_database.md): understanding space-time trade-offs in the database. diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md new file mode 100644 index 0000000000..076b66ba37 --- /dev/null +++ b/book/src/advanced_database.md @@ -0,0 +1,60 @@ +# Database Configuration + +Lighthouse uses an efficient "split" database schema, whereby finalized states are stored separately +from recent, unfinalized states. We refer to the portion of the database storing finalized states as +the _freezer_ or _cold DB_, and the portion storing recent states as the _hot DB_. + +In both the hot and cold DBs, full `BeaconState` data structures are only stored periodically, and +intermediate states are reconstructed by quickly replaying blocks on top of the nearest state. For +example, to fetch a state at slot 7 the database might fetch a full state from slot 0, and replay +blocks from slots 1-7 while omitting redundant signature checks and Merkle root calculations. The +full states upon which blocks are replayed are referred to as _restore points_ in the case of the +freezer DB, and _epoch boundary states_ in the case of the hot DB. + +The frequency at which the hot database stores full `BeaconState`s is fixed to one-state-per-epoch +in order to keep loads of recent states performant. For the freezer DB, the frequency is +configurable via the `--slots-per-restore-point` CLI flag, which is the topic of the next section. + +## Freezer DB Space-time Trade-offs + +Frequent restore points use more disk space but accelerate the loading of historical states. +Conversely, infrequent restore points use much less space, but cause the loading of historical +states to slow down dramatically. A lower _slots per restore point_ value (SPRP) corresponds to more +frequent restore points, while a higher SPRP corresponds to less frequent. The table below shows +some example values. + +| Use Case | SPRP | Yearly Disk Usage | Load Historical State | +| ---------------------- | -------------- | ----------------- | --------------------- | +| Block explorer/analysis | 32 | 411 GB | 96 ms | +| Default | 2048 | 6.4 GB | 6 s | +| Validator only | 8192 | 1.6 GB | 25 s | + +As you can see, it's a high-stakes trade-off! The relationships to disk usage and historical state +load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP +is 32, and the maximum is 8192. + +The values shown in the table are approximate, calculated using a simple heuristic: each +`BeaconState` consumes around 5MB of disk space, and each block replayed takes around 3ms. The +**Yearly Disk Usage** column shows the approx size of the freezer DB _alone_ (hot DB not included), +and the **Load Historical State** time is the worst-case load time for a state in the last slot of +an epoch. + +To configure your Lighthouse node's database with a non-default SPRP, run your Beacon Node with +the `--slots-per-restore-point` flag: + +```bash +lighthouse beacon_node --slots-per-restore-point 8192 +``` + +## Glossary + +* _Freezer DB_: part of the database storing finalized states. States are stored in a sparser + format, and usually less frequently than in the hot DB. +* _Cold DB_: see _Freezer DB_. +* _Hot DB_: part of the database storing recent states, all blocks, and other runtime data. Full + states are stored every epoch. +* _Restore Point_: a full `BeaconState` stored periodically in the freezer DB. +* _Slots Per Restore Point (SPRP)_: the number of slots between restore points in the freezer DB. +* _Split Slot_: the slot at which states are divided between the hot and the cold DBs. All states + from slots less than the split slot are in the freezer, while all states with slots greater than + or equal to the split slot are in the hot DB. From 1abb964652ce742c5388736690abc4201cb51dd1 Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Mon, 20 Jan 2020 03:33:28 +0400 Subject: [PATCH 07/16] Update op_pool to use proper rewards (#707) * Update op_pool to use proper rewards * Fix missing use import for tests * Address Michael's comments * Revert to private ValidatorStatuses * Rename variable for clearer code * Fix update_cover function * Remove expect * Add WIP test for rewards * Use aggregation_bits instead of earliest_attestation_validators * Use earliest attestation in test and correct typo * Fix op_pool test thanks to @michaelsproul 's help * Change test name --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +- beacon_node/beacon_chain/src/errors.rs | 2 + eth2/operation_pool/src/attestation.rs | 46 ++++-- eth2/operation_pool/src/lib.rs | 145 ++++++++++++++++-- .../src/common/get_base_reward.rs | 23 +++ eth2/state_processing/src/common/mod.rs | 2 + .../src/per_epoch_processing/apply_rewards.rs | 24 +-- 7 files changed, 203 insertions(+), 46 deletions(-) create mode 100644 eth2/state_processing/src/common/get_base_reward.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index cf55297a77..4edf1a8328 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -10,6 +10,7 @@ use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use crate::timeout_rw_lock::TimeoutRwLock; use lmd_ghost::LmdGhost; use operation_pool::{OperationPool, PersistedOperationPool}; +use parking_lot::RwLock; use slog::{debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; @@ -1514,7 +1515,11 @@ impl BeaconChain { graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), - attestations: self.op_pool.get_attestations(&state, &self.spec).into(), + attestations: self + .op_pool + .get_attestations(&state, &self.spec) + .map_err(BlockProductionError::OpPoolError)? + .into(), deposits, voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), }, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 809fb1d600..3f0fd7c0f8 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,5 +1,6 @@ use crate::eth1_chain::Error as Eth1ChainError; use crate::fork_choice::Error as ForkChoiceError; +use operation_pool::OpPoolError; use ssz_types::Error as SszTypesError; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::BlockProcessingError; @@ -67,6 +68,7 @@ pub enum BlockProductionError { BlockProcessingError(BlockProcessingError), Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), + OpPoolError(OpPoolError), /// The `BeaconChain` was explicitly configured _without_ a connection to eth1, therefore it /// cannot produce blocks. NoEth1ChainConnection, diff --git a/eth2/operation_pool/src/attestation.rs b/eth2/operation_pool/src/attestation.rs index c2cc9d56ce..3b56061322 100644 --- a/eth2/operation_pool/src/attestation.rs +++ b/eth2/operation_pool/src/attestation.rs @@ -1,35 +1,52 @@ use crate::max_cover::MaxCover; -use types::{Attestation, BeaconState, BitList, EthSpec}; +use state_processing::common::{get_attesting_indices, get_base_reward}; +use std::collections::HashMap; +use types::{Attestation, BeaconState, BitList, ChainSpec, EthSpec}; pub struct AttMaxCover<'a, T: EthSpec> { /// Underlying attestation. att: &'a Attestation, - /// Bitfield of validators that are covered by this attestation. - fresh_validators: BitList, + /// Mapping of validator indices and their rewards. + fresh_validators_rewards: HashMap, } impl<'a, T: EthSpec> AttMaxCover<'a, T> { pub fn new( att: &'a Attestation, - fresh_validators: BitList, - ) -> Self { - Self { + state: &BeaconState, + total_active_balance: u64, + spec: &ChainSpec, + ) -> Option { + let fresh_validators = earliest_attestation_validators(att, state); + let indices = get_attesting_indices(state, &att.data, &fresh_validators).ok()?; + let fresh_validators_rewards: HashMap = indices + .iter() + .map(|i| *i as u64) + .flat_map(|validator_index| { + let reward = + get_base_reward(state, validator_index as usize, total_active_balance, spec) + .ok()? + / spec.proposer_reward_quotient; + Some((validator_index, reward)) + }) + .collect(); + Some(Self { att, - fresh_validators, - } + fresh_validators_rewards, + }) } } impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { type Object = Attestation; - type Set = BitList; + type Set = HashMap; fn object(&self) -> Attestation { self.att.clone() } - fn covering_set(&self) -> &BitList { - &self.fresh_validators + fn covering_set(&self) -> &HashMap { + &self.fresh_validators_rewards } /// Sneaky: we keep all the attestations together in one bucket, even though @@ -40,15 +57,16 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { fn update_covering_set( &mut self, best_att: &Attestation, - covered_validators: &BitList, + covered_validators: &HashMap, ) { if self.att.data.slot == best_att.data.slot && self.att.data.index == best_att.data.index { - self.fresh_validators.difference_inplace(covered_validators); + self.fresh_validators_rewards + .retain(|k, _| !covered_validators.contains_key(k)) } } fn score(&self) -> usize { - self.fresh_validators.num_set_bits() + self.fresh_validators_rewards.values().sum::() as usize } } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 0f85215e9e..3beb2f28e3 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -5,7 +5,7 @@ mod persistence; pub use persistence::PersistedOperationPool; -use attestation::{earliest_attestation_validators, AttMaxCover}; +use attestation::AttMaxCover; use attestation_id::AttestationId; use max_cover::maximum_cover; use parking_lot::RwLock; @@ -21,8 +21,8 @@ use state_processing::per_block_processing::{ use std::collections::{hash_map, HashMap, HashSet}; use std::marker::PhantomData; use types::{ - typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, ChainSpec, EthSpec, - ProposerSlashing, Validator, VoluntaryExit, + typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, + EthSpec, ProposerSlashing, RelativeEpoch, Validator, VoluntaryExit, }; #[derive(Default, Debug)] @@ -38,6 +38,11 @@ pub struct OperationPool { _phantom: PhantomData, } +#[derive(Debug, PartialEq)] +pub enum OpPoolError { + GetAttestationsTotalBalanceError(BeaconStateError), +} + impl OperationPool { /// Create a new operation pool. pub fn new() -> Self { @@ -95,13 +100,19 @@ impl OperationPool { &self, state: &BeaconState, spec: &ChainSpec, - ) -> Vec> { + ) -> Result>, OpPoolError> { // Attestations for the current fork, which may be from the current or previous epoch. let prev_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); let prev_domain_bytes = AttestationId::compute_domain_bytes(prev_epoch, state, spec); let curr_domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec); let reader = self.attestations.read(); + let active_indices = state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .map_err(OpPoolError::GetAttestationsTotalBalanceError)?; + let total_active_balance = state + .get_total_balance(&active_indices, spec) + .map_err(OpPoolError::GetAttestationsTotalBalanceError)?; let valid_attestations = reader .iter() .filter(|(key, _)| { @@ -119,9 +130,12 @@ impl OperationPool { ) .is_ok() }) - .map(|att| AttMaxCover::new(att, earliest_attestation_validators(att, state))); + .flat_map(|att| AttMaxCover::new(att, state, total_active_balance, spec)); - maximum_cover(valid_attestations, T::MaxAttestations::to_usize()) + Ok(maximum_cover( + valid_attestations, + T::MaxAttestations::to_usize(), + )) } /// Remove attestations which are too old to be included in a block. @@ -361,7 +375,10 @@ impl PartialEq for OperationPool { // TODO: more tests #[cfg(all(test, not(debug_assertions)))] mod release_tests { + use super::attestation::earliest_attestation_validators; use super::*; + use state_processing::common::{get_attesting_indices, get_base_reward}; + use std::collections::BTreeSet; use types::test_utils::*; use types::*; @@ -522,12 +539,20 @@ mod release_tests { // Before the min attestation inclusion delay, get_attestations shouldn't return anything. state.slot -= 1; - assert_eq!(op_pool.get_attestations(state, spec).len(), 0); + assert_eq!( + op_pool + .get_attestations(state, spec) + .expect("should have attestations") + .len(), + 0 + ); // Then once the delay has elapsed, we should get a single aggregated attestation. state.slot += spec.min_attestation_inclusion_delay; - let block_attestations = op_pool.get_attestations(state, spec); + let block_attestations = op_pool + .get_attestations(state, spec) + .expect("Should have block attestations"); assert_eq!(block_attestations.len(), committees.len()); let agg_att = &block_attestations[0]; @@ -684,7 +709,9 @@ mod release_tests { assert!(op_pool.num_attestations() > max_attestations); state.slot += spec.min_attestation_inclusion_delay; - let best_attestations = op_pool.get_attestations(state, spec); + let best_attestations = op_pool + .get_attestations(state, spec) + .expect("should have best attestations"); assert_eq!(best_attestations.len(), max_attestations); // All the best attestations should be signed by at least `big_step_size` (4) validators. @@ -692,4 +719,104 @@ mod release_tests { assert!(att.aggregation_bits.num_set_bits() >= big_step_size); } } + + #[test] + fn attestation_rewards() { + let small_step_size = 2; + let big_step_size = 4; + + let (ref mut state, ref keypairs, ref spec) = + attestation_test_state::(big_step_size); + + let op_pool = OperationPool::new(); + + let slot = state.slot - 1; + let committees = state + .get_beacon_committees_at_slot(slot) + .unwrap() + .into_iter() + .map(BeaconCommittee::into_owned) + .collect::>(); + + let max_attestations = ::MaxAttestations::to_usize(); + let target_committee_size = spec.target_committee_size as usize; + + // Each validator will have a multiple of 1_000_000_000 wei. + // Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000). + for i in 0..state.validators.len() { + state.validators[i].effective_balance = 1_000_000_000 * i as u64; + } + + let insert_attestations = |bc: &OwnedBeaconCommittee, step_size| { + for i in (0..target_committee_size).step_by(step_size) { + let att = signed_attestation( + &bc.committee, + bc.index, + keypairs, + i..i + step_size, + slot, + state, + spec, + if i == 0 { None } else { Some(0) }, + ); + op_pool.insert_attestation(att, state, spec).unwrap(); + } + }; + + for committee in &committees { + assert_eq!(committee.committee.len(), target_committee_size); + // Attestations signed by only 2-3 validators + insert_attestations(committee, small_step_size); + // Attestations signed by 4+ validators + insert_attestations(committee, big_step_size); + } + + let num_small = target_committee_size / small_step_size; + let num_big = target_committee_size / big_step_size; + + assert_eq!(op_pool.attestations.read().len(), committees.len()); + assert_eq!( + op_pool.num_attestations(), + (num_small + num_big) * committees.len() + ); + assert!(op_pool.num_attestations() > max_attestations); + + state.slot += spec.min_attestation_inclusion_delay; + let best_attestations = op_pool + .get_attestations(state, spec) + .expect("should have valid best attestations"); + assert_eq!(best_attestations.len(), max_attestations); + + let active_indices = state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .unwrap(); + let total_active_balance = state.get_total_balance(&active_indices, spec).unwrap(); + + // Set of indices covered by previous attestations in `best_attestations`. + let mut seen_indices = BTreeSet::new(); + // Used for asserting that rewards are in decreasing order. + let mut prev_reward = u64::max_value(); + + for att in &best_attestations { + let fresh_validators_bitlist = earliest_attestation_validators(att, state); + let att_indices = + get_attesting_indices(state, &att.data, &fresh_validators_bitlist).unwrap(); + let fresh_indices = &att_indices - &seen_indices; + + let rewards = fresh_indices + .iter() + .map(|validator_index| { + get_base_reward(state, *validator_index as usize, total_active_balance, spec) + .unwrap() + / spec.proposer_reward_quotient + }) + .sum(); + + // Check that rewards are in decreasing order + assert!(prev_reward >= rewards); + + prev_reward = rewards; + seen_indices.extend(fresh_indices); + } + } } diff --git a/eth2/state_processing/src/common/get_base_reward.rs b/eth2/state_processing/src/common/get_base_reward.rs new file mode 100644 index 0000000000..d4da9ff320 --- /dev/null +++ b/eth2/state_processing/src/common/get_base_reward.rs @@ -0,0 +1,23 @@ +use integer_sqrt::IntegerSquareRoot; +use types::*; + +/// Returns the base reward for some validator. +/// +/// Spec v0.9.1 +pub fn get_base_reward( + state: &BeaconState, + index: usize, + // Should be == get_total_active_balance(state, spec) + total_active_balance: u64, + spec: &ChainSpec, +) -> Result { + if total_active_balance == 0 { + Ok(0) + } else { + Ok( + state.get_effective_balance(index, spec)? * spec.base_reward_factor + / total_active_balance.integer_sqrt() + / spec.base_rewards_per_epoch, + ) + } +} diff --git a/eth2/state_processing/src/common/mod.rs b/eth2/state_processing/src/common/mod.rs index 2bf8e0fc43..ce17b42625 100644 --- a/eth2/state_processing/src/common/mod.rs +++ b/eth2/state_processing/src/common/mod.rs @@ -1,9 +1,11 @@ mod get_attesting_indices; +mod get_base_reward; mod get_indexed_attestation; mod initiate_validator_exit; mod slash_validator; pub use get_attesting_indices::get_attesting_indices; +pub use get_base_reward::get_base_reward; pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index f0f701160c..4d5c69c6c5 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -1,6 +1,7 @@ +use super::super::common::get_base_reward; use super::validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; use super::Error; -use integer_sqrt::IntegerSquareRoot; + use types::*; /// Use to track the changes to a validators balance. @@ -211,24 +212,3 @@ fn get_attestation_delta( delta } - -/// Returns the base reward for some validator. -/// -/// Spec v0.9.1 -fn get_base_reward( - state: &BeaconState, - index: usize, - // Should be == get_total_active_balance(state, spec) - total_active_balance: u64, - spec: &ChainSpec, -) -> Result { - if total_active_balance == 0 { - Ok(0) - } else { - Ok( - state.get_effective_balance(index, spec)? * spec.base_reward_factor - / total_active_balance.integer_sqrt() - / spec.base_rewards_per_epoch, - ) - } -} From 7396cd2cabb7f0a2e031ed6a4188e2a19bb7b331 Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Tue, 21 Jan 2020 11:38:56 +0400 Subject: [PATCH 08/16] Fix clippy warnings (#813) * Clippy account manager * Clippy account_manager * Clippy beacon_node/beacon_chain * Clippy beacon_node/client * Clippy beacon_node/eth1 * Clippy beacon_node/eth2-libp2p * Clippy beacon_node/genesis * Clippy beacon_node/network * Clippy beacon_node/rest_api * Clippy beacon_node/src * Clippy beacon_node/store * Clippy eth2/lmd_ghost * Clippy eth2/operation_pool * Clippy eth2/state_processing * Clippy eth2/types * Clippy eth2/utils/bls * Clippy eth2/utils/cahced_tree_hash * Clippy eth2/utils/deposit_contract * Clippy eth2/utils/eth2_interop_keypairs * Clippy eth2/utils/eth2_testnet_config * Clippy eth2/utils/lighthouse_metrics * Clippy eth2/utils/ssz * Clippy eth2/utils/ssz_types * Clippy eth2/utils/tree_hash_derive * Clippy lcli * Clippy tests/beacon_chain_sim * Clippy validator_client * Cargo fmt --- account_manager/src/lib.rs | 8 +- beacon_node/beacon_chain/src/beacon_chain.rs | 123 +++++++++--------- beacon_node/beacon_chain/src/builder.rs | 10 +- beacon_node/beacon_chain/src/eth1_chain.rs | 60 ++++----- beacon_node/beacon_chain/src/fork_choice.rs | 2 +- beacon_node/beacon_chain/src/head_tracker.rs | 4 +- beacon_node/beacon_chain/src/test_utils.rs | 3 +- beacon_node/client/src/builder.rs | 18 +-- beacon_node/client/src/notifier.rs | 11 +- beacon_node/eth1/src/block_cache.rs | 2 +- beacon_node/eth1/src/deposit_cache.rs | 47 +++---- beacon_node/eth1/src/http.rs | 4 +- beacon_node/eth1/src/service.rs | 2 +- beacon_node/eth1/tests/test.rs | 2 +- beacon_node/eth2-libp2p/src/behaviour.rs | 4 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 4 +- beacon_node/eth2-libp2p/tests/common/mod.rs | 9 +- .../eth2-libp2p/tests/gossipsub_tests.rs | 64 ++++----- beacon_node/eth2-libp2p/tests/rpc_tests.rs | 46 +++---- beacon_node/genesis/src/common.rs | 2 +- beacon_node/genesis/tests/tests.rs | 3 +- beacon_node/network/src/message_handler.rs | 2 +- beacon_node/network/src/message_processor.rs | 8 +- beacon_node/network/src/service.rs | 2 +- .../network/src/sync/network_context.rs | 6 +- .../network/src/sync/range_sync/batch.rs | 16 +-- .../network/src/sync/range_sync/chain.rs | 12 +- .../network/src/sync/range_sync/range.rs | 45 ++++--- beacon_node/rest_api/src/helpers.rs | 4 +- beacon_node/rest_api/src/lib.rs | 2 + beacon_node/rest_api/src/router.rs | 2 + beacon_node/rest_api/tests/test.rs | 22 ++-- beacon_node/src/config.rs | 2 +- beacon_node/store/benches/benches.rs | 3 +- .../examples/ssz_encode_state_container.rs | 1 - beacon_node/store/src/block_at_slot.rs | 11 +- beacon_node/store/src/block_root_tree.rs | 9 +- beacon_node/store/src/chunked_vector.rs | 6 +- beacon_node/store/src/forwards_iter.rs | 10 +- beacon_node/store/src/hot_cold_store.rs | 9 +- beacon_node/store/src/iter.rs | 4 +- beacon_node/store/src/memory_store.rs | 6 +- beacon_node/store/src/migrate.rs | 7 +- eth2/lmd_ghost/src/reduced_tree.rs | 29 +++-- eth2/operation_pool/src/max_cover.rs | 4 +- eth2/state_processing/benches/benches.rs | 16 +-- eth2/state_processing/tests/tests.rs | 4 +- eth2/types/benches/benches.rs | 3 +- eth2/types/src/attestation.rs | 4 +- eth2/types/src/beacon_state.rs | 4 +- .../builders/testing_attestation_builder.rs | 5 +- .../builders/testing_beacon_state_builder.rs | 4 +- .../testing_proposer_slashing_builder.rs | 2 +- eth2/utils/bls/src/aggregate_signature.rs | 7 +- eth2/utils/bls/src/public_key.rs | 2 +- eth2/utils/bls/src/signature.rs | 4 +- eth2/utils/bls/src/signature_set.rs | 6 +- eth2/utils/cached_tree_hash/src/test.rs | 10 +- eth2/utils/deposit_contract/src/lib.rs | 2 +- eth2/utils/eth2_interop_keypairs/src/lib.rs | 3 +- eth2/utils/eth2_testnet_config/src/lib.rs | 8 +- eth2/utils/lighthouse_metrics/src/lib.rs | 1 + eth2/utils/ssz/src/decode.rs | 4 +- eth2/utils/ssz/src/decode/impls.rs | 7 +- eth2/utils/ssz/src/encode.rs | 2 +- eth2/utils/ssz/src/lib.rs | 2 +- eth2/utils/ssz/tests/tests.rs | 1 + eth2/utils/ssz_types/src/bitfield.rs | 8 +- eth2/utils/ssz_types/src/fixed_vector.rs | 8 +- eth2/utils/ssz_types/src/variable_list.rs | 8 +- eth2/utils/tree_hash_derive/src/lib.rs | 2 +- lcli/src/deploy_deposit_contract.rs | 6 +- lcli/src/refund_deposit_contract.rs | 2 +- lcli/src/transition_blocks.rs | 4 +- tests/beacon_chain_sim/src/main.rs | 2 +- validator_client/src/attestation_service.rs | 5 +- validator_client/src/duties_service.rs | 2 +- validator_client/src/validator_directory.rs | 5 +- 78 files changed, 387 insertions(+), 416 deletions(-) diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index b0f3525b17..719d5728ab 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -156,7 +156,7 @@ fn run_new_validator_subcommand( }) .map(|password| { // Trim the line feed from the end of the password file, if present. - if password.ends_with("\n") { + if password.ends_with('\n') { password[0..password.len() - 1].to_string() } else { password @@ -337,7 +337,7 @@ fn deposit_validators( .map(|_| event_loop) }) // Web3 gives errors if the event loop is dropped whilst performing requests. - .map(|event_loop| drop(event_loop)) + .map(drop) } /// For the given `ValidatorDirectory`, submit a deposit transaction to the `web3` node. @@ -367,7 +367,7 @@ fn deposit_validator( .into_future() .and_then(move |(voting_keypair, deposit_data)| { let pubkey_1 = voting_keypair.pk.clone(); - let pubkey_2 = voting_keypair.pk.clone(); + let pubkey_2 = voting_keypair.pk; let web3_1 = web3.clone(); let web3_2 = web3.clone(); @@ -421,7 +421,7 @@ fn deposit_validator( to: Some(deposit_contract), gas: Some(U256::from(DEPOSIT_GAS)), gas_price: None, - value: Some(U256::from(from_gwei(deposit_amount))), + value: Some(from_gwei(deposit_amount)), data: Some(deposit_data.into()), nonce: None, condition: None, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4edf1a8328..4a27088306 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -25,6 +25,7 @@ use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, }; use std::borrow::Cow; +use std::cmp::Ordering; use std::fs; use std::io::prelude::*; use std::sync::Arc; @@ -512,65 +513,67 @@ impl BeaconChain { pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { let head_state = self.head()?.beacon_state; - if slot == head_state.slot { - Ok(head_state) - } else if slot > head_state.slot { - if slot > head_state.slot + T::EthSpec::slots_per_epoch() { - warn!( - self.log, - "Skipping more than an epoch"; - "head_slot" => head_state.slot, - "request_slot" => slot - ) - } - - let start_slot = head_state.slot; - let task_start = Instant::now(); - let max_task_runtime = Duration::from_millis(self.spec.milliseconds_per_slot); - - let head_state_slot = head_state.slot; - let mut state = head_state; - while state.slot < slot { - // Do not allow and forward state skip that takes longer than the maximum task duration. - // - // This is a protection against nodes doing too much work when they're not synced - // to a chain. - if task_start + max_task_runtime < Instant::now() { - return Err(Error::StateSkipTooLarge { - start_slot, - requested_slot: slot, - max_task_runtime, - }); + match slot.cmp(&head_state.slot) { + Ordering::Equal => Ok(head_state), + Ordering::Greater => { + if slot > head_state.slot + T::EthSpec::slots_per_epoch() { + warn!( + self.log, + "Skipping more than an epoch"; + "head_slot" => head_state.slot, + "request_slot" => slot + ) } - // Note: supplying some `state_root` when it is known would be a cheap and easy - // optimization. - match per_slot_processing(&mut state, None, &self.spec) { - Ok(()) => (), - Err(e) => { - warn!( - self.log, - "Unable to load state at slot"; - "error" => format!("{:?}", e), - "head_slot" => head_state_slot, - "requested_slot" => slot - ); - return Err(Error::NoStateForSlot(slot)); - } - }; - } - Ok(state) - } else { - let state_root = self - .rev_iter_state_roots()? - .take_while(|(_root, current_slot)| *current_slot >= slot) - .find(|(_root, current_slot)| *current_slot == slot) - .map(|(root, _slot)| root) - .ok_or_else(|| Error::NoStateForSlot(slot))?; + let start_slot = head_state.slot; + let task_start = Instant::now(); + let max_task_runtime = Duration::from_millis(self.spec.milliseconds_per_slot); - Ok(self - .get_state_caching(&state_root, Some(slot))? - .ok_or_else(|| Error::NoStateForSlot(slot))?) + let head_state_slot = head_state.slot; + let mut state = head_state; + while state.slot < slot { + // Do not allow and forward state skip that takes longer than the maximum task duration. + // + // This is a protection against nodes doing too much work when they're not synced + // to a chain. + if task_start + max_task_runtime < Instant::now() { + return Err(Error::StateSkipTooLarge { + start_slot, + requested_slot: slot, + max_task_runtime, + }); + } + + // Note: supplying some `state_root` when it is known would be a cheap and easy + // optimization. + match per_slot_processing(&mut state, None, &self.spec) { + Ok(()) => (), + Err(e) => { + warn!( + self.log, + "Unable to load state at slot"; + "error" => format!("{:?}", e), + "head_slot" => head_state_slot, + "requested_slot" => slot + ); + return Err(Error::NoStateForSlot(slot)); + } + }; + } + Ok(state) + } + Ordering::Less => { + let state_root = self + .rev_iter_state_roots()? + .take_while(|(_root, current_slot)| *current_slot >= slot) + .find(|(_root, current_slot)| *current_slot == slot) + .map(|(root, _slot)| root) + .ok_or_else(|| Error::NoStateForSlot(slot))?; + + Ok(self + .get_state_caching(&state_root, Some(slot))? + .ok_or_else(|| Error::NoStateForSlot(slot))?) + } } } @@ -638,7 +641,7 @@ impl BeaconChain { let head_state = &self.head()?.beacon_state; let mut state = if epoch(slot) == epoch(head_state.slot) { - self.head()?.beacon_state.clone() + self.head()?.beacon_state } else { self.state_at_slot(slot)? }; @@ -671,7 +674,7 @@ impl BeaconChain { let head_state = &self.head()?.beacon_state; let mut state = if epoch == as_epoch(head_state.slot) { - self.head()?.beacon_state.clone() + self.head()?.beacon_state } else { self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? }; @@ -1754,9 +1757,9 @@ impl BeaconChain { let mut dump = vec![]; let mut last_slot = CheckPoint { - beacon_block: self.head()?.beacon_block.clone(), + beacon_block: self.head()?.beacon_block, beacon_block_root: self.head()?.beacon_block_root, - beacon_state: self.head()?.beacon_state.clone(), + beacon_state: self.head()?.beacon_state, beacon_state_root: self.head()?.beacon_state_root, }; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 5738d494e8..fc951635fc 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -448,7 +448,7 @@ where let fork_choice = if let Some(persisted_beacon_chain) = &self.persisted_beacon_chain { ForkChoice::from_ssz_container( persisted_beacon_chain.fork_choice.clone(), - store.clone(), + store, block_root_tree, ) .map_err(|e| format!("Unable to decode fork choice from db: {:?}", e))? @@ -462,7 +462,7 @@ where .ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?; let backend = ThreadSafeReducedTree::new( - store.clone(), + store, block_root_tree, &finalized_checkpoint.beacon_block, finalized_checkpoint.beacon_block_root, @@ -626,7 +626,7 @@ mod test { #[test] fn recent_genesis() { let validator_count = 8; - let genesis_time = 13371337; + let genesis_time = 13_371_337; let log = get_logger(); let store = Arc::new(MemoryStore::open()); @@ -641,7 +641,7 @@ mod test { let chain = BeaconChainBuilder::new(MinimalEthSpec) .logger(log.clone()) - .store(store.clone()) + .store(store) .store_migrator(NullMigrator) .genesis_state(genesis_state) .expect("should build state using recent genesis") @@ -662,7 +662,7 @@ mod test { assert_eq!(state.slot, Slot::new(0), "should start from genesis"); assert_eq!( - state.genesis_time, 13371337, + state.genesis_time, 13_371_337, "should have the correct genesis time" ); assert_eq!( diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 9b7f772048..e521923978 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -8,6 +8,7 @@ use rand::prelude::*; use slog::{crit, debug, error, trace, Logger}; use ssz_derive::{Decode, Encode}; use state_processing::per_block_processing::get_new_eth1_data; +use std::cmp::Ordering; use std::collections::HashMap; use std::iter::DoubleEndedIterator; use std::iter::FromIterator; @@ -343,8 +344,7 @@ impl> Eth1ChainBackend for CachingEth1Backend voting_period_start_seconds) - .skip(eth1_follow_distance as usize) - .next() + .nth(eth1_follow_distance as usize) .map(|block| { trace!( self.log, @@ -392,21 +392,21 @@ impl> Eth1ChainBackend for CachingEth1Backend deposit_count { - Err(Error::DepositIndexTooHigh) - } else if deposit_index == deposit_count { - Ok(vec![]) - } else { - let next = deposit_index; - let last = std::cmp::min(deposit_count, next + T::MaxDeposits::to_u64()); + match deposit_index.cmp(&deposit_count) { + Ordering::Greater => Err(Error::DepositIndexTooHigh), + Ordering::Equal => Ok(vec![]), + Ordering::Less => { + let next = deposit_index; + let last = std::cmp::min(deposit_count, next + T::MaxDeposits::to_u64()); - self.core - .deposits() - .read() - .cache - .get_deposits(next, last, deposit_count, DEPOSIT_TREE_DEPTH) - .map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e))) - .map(|(_deposit_root, deposits)| deposits) + self.core + .deposits() + .read() + .cache + .get_deposits(next, last, deposit_count, DEPOSIT_TREE_DEPTH) + .map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e))) + .map(|(_deposit_root, deposits)| deposits) + } } } @@ -775,7 +775,7 @@ mod test { let deposits_for_inclusion = eth1_chain .deposits_for_block_inclusion(&state, &random_eth1_data(), spec) - .expect(&format!("should find deposit for {}", i)); + .unwrap_or_else(|_| panic!("should find deposit for {}", i)); let expected_len = std::cmp::min(i - initial_deposit_index, max_deposits as usize); @@ -853,7 +853,7 @@ mod test { state.slot = Slot::new(period * 1_000 + period / 2); // Add 50% of the votes so a lookup is required. - for _ in 0..period / 2 + 1 { + for _ in 0..=period / 2 { state .eth1_data_votes .push(random_eth1_data()) @@ -932,7 +932,7 @@ mod test { state.slot = Slot::new(period / 2); // Add 50% of the votes so a lookup is required. - for _ in 0..period / 2 + 1 { + for _ in 0..=period / 2 { state .eth1_data_votes .push(random_eth1_data()) @@ -1090,7 +1090,7 @@ mod test { eth1_block.number, *new_eth1_data .get(ð1_block.clone().eth1_data().unwrap()) - .expect(&format!( + .unwrap_or_else(|| panic!( "new_eth1_data should have expected block #{}", eth1_block.number )) @@ -1135,8 +1135,8 @@ mod test { let votes = collect_valid_votes( &state, - HashMap::from_iter(new_eth1_data.clone().into_iter()), - HashMap::from_iter(all_eth1_data.clone().into_iter()), + HashMap::from_iter(new_eth1_data.into_iter()), + HashMap::from_iter(all_eth1_data.into_iter()), ); assert_eq!( votes.len(), @@ -1164,7 +1164,7 @@ mod test { let votes = collect_valid_votes( &state, HashMap::from_iter(new_eth1_data.clone().into_iter()), - HashMap::from_iter(all_eth1_data.clone().into_iter()), + HashMap::from_iter(all_eth1_data.into_iter()), ); assert_votes!( votes, @@ -1196,8 +1196,8 @@ mod test { let votes = collect_valid_votes( &state, - HashMap::from_iter(new_eth1_data.clone().into_iter()), - HashMap::from_iter(all_eth1_data.clone().into_iter()), + HashMap::from_iter(new_eth1_data.into_iter()), + HashMap::from_iter(all_eth1_data.into_iter()), ); assert_votes!( votes, @@ -1230,12 +1230,12 @@ mod test { .expect("should have some eth1 data") .clone(); - state.eth1_data_votes = vec![non_new_eth1_data.0.clone()].into(); + state.eth1_data_votes = vec![non_new_eth1_data.0].into(); let votes = collect_valid_votes( &state, - HashMap::from_iter(new_eth1_data.clone().into_iter()), - HashMap::from_iter(all_eth1_data.clone().into_iter()), + HashMap::from_iter(new_eth1_data.into_iter()), + HashMap::from_iter(all_eth1_data.into_iter()), ); assert_votes!( @@ -1268,8 +1268,8 @@ mod test { let votes = collect_valid_votes( &state, - HashMap::from_iter(new_eth1_data.clone().into_iter()), - HashMap::from_iter(all_eth1_data.clone().into_iter()), + HashMap::from_iter(new_eth1_data.into_iter()), + HashMap::from_iter(all_eth1_data.into_iter()), ); assert_votes!( diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index b8d0e4a460..3a727a1f65 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -294,7 +294,7 @@ impl ForkChoice { /// Returns a `SszForkChoice` which contains the current state of `Self`. pub fn as_ssz_container(&self) -> SszForkChoice { SszForkChoice { - genesis_block_root: self.genesis_block_root.clone(), + genesis_block_root: self.genesis_block_root, justified_checkpoint: self.justified_checkpoint.read().clone(), best_justified_checkpoint: self.best_justified_checkpoint.read().clone(), backend_bytes: self.backend.as_bytes(), diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 850f9629c5..7bae0ce62d 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -59,10 +59,10 @@ impl HeadTracker { let slots_len = ssz_container.slots.len(); if roots_len != slots_len { - return Err(Error::MismatchingLengths { + Err(Error::MismatchingLengths { roots_len, slots_len, - }); + }) } else { let map = HashMap::from_iter( ssz_container diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index fbf437fd18..337878d4cd 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -173,7 +173,7 @@ impl BeaconChainHarness> { let chain = BeaconChainBuilder::new(eth_spec_instance) .logger(log.clone()) - .custom_spec(spec.clone()) + .custom_spec(spec) .store(store.clone()) .store_migrator( as Migrate<_, E>>::new(store)) .resume_from_db(Eth1Config::default()) @@ -236,7 +236,6 @@ where self.chain .state_at_slot(state_slot) .expect("should find state for slot") - .clone() }; // Determine the first slot where a block should be built. diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index d35fbeb46f..611763e9b6 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -151,7 +151,7 @@ where let builder = BeaconChainBuilder::new(eth_spec_instance) .logger(context.log.clone()) - .store(store.clone()) + .store(store) .store_migrator(store_migrator) .custom_spec(spec.clone()); @@ -223,7 +223,7 @@ where Box::new(future) } ClientGenesis::RemoteNode { server, .. } => { - let future = Bootstrapper::connect(server.to_string(), &context.log) + let future = Bootstrapper::connect(server, &context.log) .map_err(|e| { format!("Failed to initialize bootstrap client: {}", e) }) @@ -306,14 +306,14 @@ where .ok_or_else(|| "http_server requires a libp2p network sender")?; let network_info = rest_api::NetworkInfo { - network_service: network.clone(), - network_chan: network_send.clone(), + network_service: network, + network_chan: network_send, }; let (exit_signal, listening_addr) = rest_api::start_server( &client_config.rest_api, &context.executor, - beacon_chain.clone(), + beacon_chain, network_info, client_config .create_db_path() @@ -529,7 +529,7 @@ where spec, context.log, ) - .map_err(|e| format!("Unable to open database: {:?}", e).to_string())?; + .map_err(|e| format!("Unable to open database: {:?}", e))?; self.store = Some(Arc::new(store)); Ok(self) } @@ -557,8 +557,8 @@ where { /// Specifies that the `Client` should use a `DiskStore` database. pub fn simple_disk_store(mut self, path: &Path) -> Result { - let store = SimpleDiskStore::open(path) - .map_err(|e| format!("Unable to open database: {:?}", e).to_string())?; + let store = + SimpleDiskStore::open(path).map_err(|e| format!("Unable to open database: {:?}", e))?; self.store = Some(Arc::new(store)); Ok(self) } @@ -660,7 +660,7 @@ where .ok_or_else(|| "caching_eth1_backend requires a store".to_string())?; let backend = if let Some(eth1_service_from_genesis) = self.eth1_service { - eth1_service_from_genesis.update_config(config.clone())?; + eth1_service_from_genesis.update_config(config)?; // This cache is not useful because it's first (earliest) block likely the block that // triggered genesis. diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 3785821003..faaf8464c5 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -17,7 +17,7 @@ pub const WARN_PEER_COUNT: usize = 1; const SECS_PER_MINUTE: f64 = 60.0; const SECS_PER_HOUR: f64 = 3600.0; const SECS_PER_DAY: f64 = 86400.0; // non-leap -const SECS_PER_WEEK: f64 = 604800.0; // non-leap +const SECS_PER_WEEK: f64 = 604_800.0; // non-leap const DAYS_PER_WEEK: f64 = 7.0; const HOURS_PER_DAY: f64 = 24.0; const MINUTES_PER_HOUR: f64 = 60.0; @@ -166,13 +166,14 @@ pub fn spawn_notifier( .then(move |result| { match result { Ok(()) => Ok(()), - Err(e) => Ok(error!( + Err(e) => { + error!( log_3, "Notifier failed to notify"; "error" => format!("{:?}", e) - )) - } - }); + ); + Ok(()) + } } }); let (exit_signal, exit) = exit_future::signal(); context diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index 088d8dcfdc..4bcd23740e 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -224,7 +224,7 @@ mod tests { ); } - let mut cache_2 = cache.clone(); + let mut cache_2 = cache; cache_2.truncate(17); assert_eq!( cache_2.blocks.len(), diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index b3fe67b7ec..a0f9e99962 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -1,6 +1,7 @@ use crate::DepositLog; use eth2_hashing::hash; use ssz_derive::{Decode, Encode}; +use std::cmp::Ordering; use tree_hash::TreeHash; use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH}; @@ -196,24 +197,26 @@ impl DepositCache { /// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty). /// - If a log with `log.index` is already known, but the given `log` is distinct to it. pub fn insert_log(&mut self, log: DepositLog) -> Result<(), Error> { - if log.index == self.logs.len() as u64 { - let deposit = Hash256::from_slice(&log.deposit_data.tree_hash_root()); - self.leaves.push(deposit); - self.logs.push(log); - self.deposit_tree.push_leaf(deposit)?; - self.deposit_roots.push(self.deposit_tree.root()); - Ok(()) - } else if log.index < self.logs.len() as u64 { - if self.logs[log.index as usize] == log { + match log.index.cmp(&(self.logs.len() as u64)) { + Ordering::Equal => { + let deposit = Hash256::from_slice(&log.deposit_data.tree_hash_root()); + self.leaves.push(deposit); + self.logs.push(log); + self.deposit_tree.push_leaf(deposit)?; + self.deposit_roots.push(self.deposit_tree.root()); Ok(()) - } else { - Err(Error::DuplicateDistinctLog(log.index)) } - } else { - Err(Error::NonConsecutive { + Ordering::Less => { + if self.logs[log.index as usize] == log { + Ok(()) + } else { + Err(Error::DuplicateDistinctLog(log.index)) + } + } + Ordering::Greater => Err(Error::NonConsecutive { log_index: log.index, expected: self.logs.len(), - }) + }), } } @@ -312,14 +315,12 @@ impl DepositCache { .logs .binary_search_by(|deposit| deposit.block_number.cmp(&block_number)); match index { - Ok(index) => return self.logs.get(index).map(|x| x.index + 1), - Err(next) => { - return Some( - self.logs - .get(next.saturating_sub(1)) - .map_or(0, |x| x.index + 1), - ) - } + Ok(index) => self.logs.get(index).map(|x| x.index + 1), + Err(next) => Some( + self.logs + .get(next.saturating_sub(1)) + .map_or(0, |x| x.index + 1), + ), } } @@ -329,7 +330,7 @@ impl DepositCache { /// and queries the `deposit_roots` map to get the corresponding `deposit_root`. pub fn get_deposit_root_from_cache(&self, block_number: u64) -> Option { let index = self.get_deposit_count_from_cache(block_number)?; - Some(self.deposit_roots.get(index as usize)?.clone()) + Some(*self.deposit_roots.get(index as usize)?) } } diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index 0b2e3a32b4..130195245f 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -136,7 +136,7 @@ pub fn get_deposit_count( timeout, ) .and_then(|result| match result { - None => Err(format!("Deposit root response was none")), + None => Err("Deposit root response was none".to_string()), Some(bytes) => { if bytes.is_empty() { Ok(None) @@ -173,7 +173,7 @@ pub fn get_deposit_root( timeout, ) .and_then(|result| match result { - None => Err(format!("Deposit root response was none")), + None => Err("Deposit root response was none".to_string()), Some(bytes) => { if bytes.is_empty() { Ok(None) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 383df1d4a6..292f777fe3 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -613,7 +613,7 @@ impl Service { Ok(BlockCacheUpdateOutcome::Success { blocks_imported, - head_block_number: cache_4.clone().block_cache.read().highest_block_number(), + head_block_number: cache_4.block_cache.read().highest_block_number(), }) }) } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index d8597b9849..32f793aadd 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -758,7 +758,7 @@ mod fast { log, ); let n = 10; - let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect(); + let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { deposit_contract .deposit(runtime, deposit.clone()) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index a9e697473c..608cccf7dd 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -57,7 +57,7 @@ impl Behaviour { net_conf: &NetworkConfig, log: &slog::Logger, ) -> error::Result { - let local_peer_id = local_key.public().clone().into_peer_id(); + let local_peer_id = local_key.public().into_peer_id(); let behaviour_log = log.new(o!()); let ping_config = PingConfig::new() @@ -74,7 +74,7 @@ impl Behaviour { Ok(Behaviour { eth2_rpc: RPC::new(log.clone()), - gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()), + gossipsub: Gossipsub::new(local_peer_id, net_conf.gs_config.clone()), discovery: Discovery::new(local_key, net_conf, log)?, ping: Ping::new(ping_config), identify, diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index e7b225e16c..4f2908c932 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -145,7 +145,7 @@ where // When terminating a stream, report the stream termination to the requesting user via // an RPC error let error = RPCErrorResponse::ServerError(ErrorMessage { - error_message: "Request timed out".as_bytes().to_vec(), + error_message: b"Request timed out".to_vec(), }); // The stream termination type is irrelevant, this will terminate the @@ -510,7 +510,7 @@ where // notify the user return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error( - stream_id.get_ref().clone(), + *stream_id.get_ref(), RPCError::Custom("Stream timed out".into()), ), ))); diff --git a/beacon_node/eth2-libp2p/tests/common/mod.rs b/beacon_node/eth2-libp2p/tests/common/mod.rs index a4fa7db593..d945383ae9 100644 --- a/beacon_node/eth2-libp2p/tests/common/mod.rs +++ b/beacon_node/eth2-libp2p/tests/common/mod.rs @@ -43,8 +43,7 @@ pub fn build_libp2p_instance( ) -> LibP2PService { let config = build_config(port, boot_nodes, secret_key); // launch libp2p service - let libp2p_service = LibP2PService::new(config.clone(), log.clone()).unwrap(); - libp2p_service + LibP2PService::new(config, log.clone()).unwrap() } #[allow(dead_code)] @@ -64,10 +63,10 @@ pub fn build_full_mesh(log: slog::Logger, n: usize, start_port: Option) -> .map(|x| get_enr(&x).multiaddr()[1].clone()) .collect(); - for i in 0..n { - for j in i..n { + for (i, node) in nodes.iter_mut().enumerate().take(n) { + for (j, multiaddr) in multiaddrs.iter().enumerate().skip(i) { if i != j { - match libp2p::Swarm::dial_addr(&mut nodes[i].swarm, multiaddrs[j].clone()) { + match libp2p::Swarm::dial_addr(&mut node.swarm, multiaddr.clone()) { Ok(()) => debug!(log, "Connected"), Err(_) => error!(log, "Failed to connect"), }; diff --git a/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs b/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs index 349d754d54..37d38f9869 100644 --- a/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs +++ b/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs @@ -62,7 +62,7 @@ fn test_gossipsub_forward() { // Every node except the corner nodes are connected to 2 nodes. if subscribed_count == (num_nodes * 2) - 2 { node.swarm.publish( - &vec![Topic::new(topic.into_string())], + &[Topic::new(topic.into_string())], pubsub_message.clone(), ); } @@ -96,45 +96,37 @@ fn test_gossipsub_full_mesh_publish() { let mut received_count = 0; tokio::run(futures::future::poll_fn(move || -> Result<_, ()> { for node in nodes.iter_mut() { - loop { - match node.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::PubsubMessage { - topics, message, .. - })) => { - assert_eq!(topics.len(), 1); - // Assert topic is the published topic - assert_eq!( - topics.first().unwrap(), - &TopicHash::from_raw(publishing_topic.clone()) - ); - // Assert message received is the correct one - assert_eq!(message, pubsub_message.clone()); - received_count += 1; - if received_count == num_nodes - 1 { - return Ok(Async::Ready(())); - } - } - _ => break, + while let Async::Ready(Some(Libp2pEvent::PubsubMessage { + topics, message, .. + })) = node.poll().unwrap() + { + assert_eq!(topics.len(), 1); + // Assert topic is the published topic + assert_eq!( + topics.first().unwrap(), + &TopicHash::from_raw(publishing_topic.clone()) + ); + // Assert message received is the correct one + assert_eq!(message, pubsub_message.clone()); + received_count += 1; + if received_count == num_nodes - 1 { + return Ok(Async::Ready(())); } } } - loop { - match publishing_node.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) => { - // Received topics is one of subscribed eth2 topics - assert!(topic.clone().into_string().starts_with("/eth2/")); - // Publish on beacon block topic - if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") { - subscribed_count += 1; - if subscribed_count == num_nodes - 1 { - publishing_node.swarm.publish( - &vec![Topic::new(topic.into_string())], - pubsub_message.clone(), - ); - } - } + while let Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) = + publishing_node.poll().unwrap() + { + // Received topics is one of subscribed eth2 topics + assert!(topic.clone().into_string().starts_with("/eth2/")); + // Publish on beacon block topic + if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") { + subscribed_count += 1; + if subscribed_count == num_nodes - 1 { + publishing_node + .swarm + .publish(&[Topic::new(topic.into_string())], pubsub_message.clone()); } - _ => break, } } Ok(Async::NotReady) diff --git a/beacon_node/eth2-libp2p/tests/rpc_tests.rs b/beacon_node/eth2-libp2p/tests/rpc_tests.rs index 91390b4e77..279cc75699 100644 --- a/beacon_node/eth2-libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2-libp2p/tests/rpc_tests.rs @@ -3,6 +3,7 @@ use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::*; use eth2_libp2p::{Libp2pEvent, RPCEvent}; use slog::{warn, Level}; +use std::sync::atomic::{AtomicBool, Ordering::Relaxed}; use std::sync::{Arc, Mutex}; use std::time::Duration; use tokio::prelude::*; @@ -106,20 +107,19 @@ fn test_status_rpc() { }); // execute the futures and check the result - let test_result = Arc::new(Mutex::new(false)); + let test_result = Arc::new(AtomicBool::new(false)); let error_result = test_result.clone(); let thread_result = test_result.clone(); tokio::run( sender_future .select(receiver_future) .timeout(Duration::from_millis(1000)) - .map_err(move |_| *error_result.lock().unwrap() = false) + .map_err(move |_| error_result.store(false, Relaxed)) .map(move |result| { - *thread_result.lock().unwrap() = result.0; - () + thread_result.store(result.0, Relaxed); }), ); - assert!(*test_result.lock().unwrap()); + assert!(test_result.load(Relaxed)); } #[test] @@ -236,20 +236,19 @@ fn test_blocks_by_range_chunked_rpc() { }); // execute the futures and check the result - let test_result = Arc::new(Mutex::new(false)); + let test_result = Arc::new(AtomicBool::new(false)); let error_result = test_result.clone(); let thread_result = test_result.clone(); tokio::run( sender_future .select(receiver_future) .timeout(Duration::from_millis(1000)) - .map_err(move |_| *error_result.lock().unwrap() = false) + .map_err(move |_| error_result.store(false, Relaxed)) .map(move |result| { - *thread_result.lock().unwrap() = result.0; - () + thread_result.store(result.0, Relaxed); }), ); - assert!(*test_result.lock().unwrap()); + assert!(test_result.load(Relaxed)); } #[test] @@ -359,20 +358,19 @@ fn test_blocks_by_range_single_empty_rpc() { }); // execute the futures and check the result - let test_result = Arc::new(Mutex::new(false)); + let test_result = Arc::new(AtomicBool::new(false)); let error_result = test_result.clone(); let thread_result = test_result.clone(); tokio::run( sender_future .select(receiver_future) .timeout(Duration::from_millis(1000)) - .map_err(move |_| *error_result.lock().unwrap() = false) + .map_err(move |_| error_result.store(false, Relaxed)) .map(move |result| { - *thread_result.lock().unwrap() = result.0; - () + thread_result.store(result.0, Relaxed); }), ); - assert!(*test_result.lock().unwrap()); + assert!(test_result.load(Relaxed)); } #[test] @@ -486,20 +484,19 @@ fn test_blocks_by_root_chunked_rpc() { }); // execute the futures and check the result - let test_result = Arc::new(Mutex::new(false)); + let test_result = Arc::new(AtomicBool::new(false)); let error_result = test_result.clone(); let thread_result = test_result.clone(); tokio::run( sender_future .select(receiver_future) .timeout(Duration::from_millis(1000)) - .map_err(move |_| *error_result.lock().unwrap() = false) + .map_err(move |_| error_result.store(false, Relaxed)) .map(move |result| { - *thread_result.lock().unwrap() = result.0; - () + thread_result.store(result.0, Relaxed); }), ); - assert!(*test_result.lock().unwrap()); + assert!(test_result.load(Relaxed)); } #[test] @@ -558,18 +555,17 @@ fn test_goodbye_rpc() { }); // execute the futures and check the result - let test_result = Arc::new(Mutex::new(false)); + let test_result = Arc::new(AtomicBool::new(false)); let error_result = test_result.clone(); let thread_result = test_result.clone(); tokio::run( sender_future .select(receiver_future) .timeout(Duration::from_millis(1000)) - .map_err(move |_| *error_result.lock().unwrap() = false) + .map_err(move |_| error_result.store(false, Relaxed)) .map(move |result| { - *thread_result.lock().unwrap() = result.0; - () + thread_result.store(result.0, Relaxed); }), ); - assert!(*test_result.lock().unwrap()); + assert!(test_result.load(Relaxed)); } diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs index 9353ad33ae..5c44477384 100644 --- a/beacon_node/genesis/src/common.rs +++ b/beacon_node/genesis/src/common.rs @@ -19,7 +19,7 @@ pub fn genesis_deposits( let depth = spec.deposit_contract_tree_depth as usize; let mut tree = MerkleTree::create(&[], depth); for (i, deposit_leaf) in deposit_root_leaves.iter().enumerate() { - if let Err(_) = tree.push_leaf(*deposit_leaf, depth) { + if tree.push_leaf(*deposit_leaf, depth).is_err() { return Err(String::from("Failed to push leaf")); } diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index d3030720ac..bf9a8ce897 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -59,7 +59,6 @@ fn basic() { spec.min_genesis_active_validator_count = 8; let deposits = (0..spec.min_genesis_active_validator_count + 2) - .into_iter() .map(|i| { deposit_contract.deposit_helper::( generate_deterministic_keypair(i as usize), @@ -73,7 +72,7 @@ fn basic() { }) .collect::>(); - let deposit_future = deposit_contract.deposit_multiple(deposits.clone()); + let deposit_future = deposit_contract.deposit_multiple(deposits); let wait_future = service.wait_for_genesis_state::(update_interval, spec.clone()); diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 4b1215287d..a7c8ed40bc 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -229,7 +229,7 @@ impl MessageHandler { .on_block_gossip(peer_id.clone(), block); // TODO: Apply more sophisticated validation and decoding logic if should_forward_on { - self.propagate_message(id, peer_id.clone()); + self.propagate_message(id, peer_id); } } Err(e) => { diff --git a/beacon_node/network/src/message_processor.rs b/beacon_node/network/src/message_processor.rs index f77f8e6acd..dc4f91d2f8 100644 --- a/beacon_node/network/src/message_processor.rs +++ b/beacon_node/network/src/message_processor.rs @@ -203,7 +203,7 @@ impl MessageProcessor { ); self.network - .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + .disconnect(peer_id, GoodbyeReason::IrrelevantNetwork); } else if remote.head_slot > self.chain.slot().unwrap_or_else(|_| Slot::from(0u64)) + FUTURE_SLOT_TOLERANCE { @@ -219,7 +219,7 @@ impl MessageProcessor { "reason" => "different system clocks or genesis time" ); self.network - .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + .disconnect(peer_id, GoodbyeReason::IrrelevantNetwork); } else if remote.finalized_epoch <= local.finalized_epoch && remote.finalized_root != Hash256::zero() && local.finalized_root != Hash256::zero() @@ -239,7 +239,7 @@ impl MessageProcessor { "reason" => "different finalized chain" ); self.network - .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + .disconnect(peer_id, GoodbyeReason::IrrelevantNetwork); } else if remote.finalized_epoch < local.finalized_epoch { // The node has a lower finalized epoch, their chain is not useful to us. There are two // cases where a node can have a lower finalized epoch: @@ -512,7 +512,7 @@ impl MessageProcessor { // Inform the sync manager to find parents for this block trace!(self.log, "Block with unknown parent received"; "peer_id" => format!("{:?}",peer_id)); - self.send_to_sync(SyncMessage::UnknownBlock(peer_id, Box::new(block.clone()))); + self.send_to_sync(SyncMessage::UnknownBlock(peer_id, Box::new(block))); SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::FutureSlot { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index b1bbc3bbe5..7936e23d61 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -263,7 +263,7 @@ fn network_service( id, source, message, - topics: _, + .. } => { message_handler_send .try_send(HandlerMessage::PubsubMessage(id, source, message)) diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index a470ce9c65..f2cd7a970d 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -66,7 +66,7 @@ impl SyncNetworkContext { "count" => request.count, "peer" => format!("{:?}", peer_id) ); - self.send_rpc_request(peer_id.clone(), RPCRequest::BlocksByRange(request)) + self.send_rpc_request(peer_id, RPCRequest::BlocksByRange(request)) } pub fn blocks_by_root_request( @@ -81,7 +81,7 @@ impl SyncNetworkContext { "count" => request.block_roots.len(), "peer" => format!("{:?}", peer_id) ); - self.send_rpc_request(peer_id.clone(), RPCRequest::BlocksByRoot(request)) + self.send_rpc_request(peer_id, RPCRequest::BlocksByRoot(request)) } pub fn downvote_peer(&mut self, peer_id: PeerId) { @@ -91,7 +91,7 @@ impl SyncNetworkContext { "peer" => format!("{:?}", peer_id) ); // TODO: Implement reputation - self.disconnect(peer_id.clone(), GoodbyeReason::Fault); + self.disconnect(peer_id, GoodbyeReason::Fault); } fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index c6f8c44770..45a7672e4d 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -62,16 +62,16 @@ impl PendingBatches { let peer_request = batch.current_peer.clone(); self.peer_requests .entry(peer_request) - .or_insert_with(|| HashSet::new()) + .or_insert_with(HashSet::new) .insert(request_id); self.batches.insert(request_id, batch) } - pub fn remove(&mut self, request_id: &RequestId) -> Option> { - if let Some(batch) = self.batches.remove(request_id) { + pub fn remove(&mut self, request_id: RequestId) -> Option> { + if let Some(batch) = self.batches.remove(&request_id) { if let Entry::Occupied(mut entry) = self.peer_requests.entry(batch.current_peer.clone()) { - entry.get_mut().remove(request_id); + entry.get_mut().remove(&request_id); if entry.get().is_empty() { entry.remove(); @@ -85,8 +85,8 @@ impl PendingBatches { /// Adds a block to the batches if the request id exists. Returns None if there is no batch /// matching the request id. - pub fn add_block(&mut self, request_id: &RequestId, block: BeaconBlock) -> Option<()> { - let batch = self.batches.get_mut(request_id)?; + pub fn add_block(&mut self, request_id: RequestId, block: BeaconBlock) -> Option<()> { + let batch = self.batches.get_mut(&request_id)?; batch.downloaded_blocks.push(block); Some(()) } @@ -101,7 +101,7 @@ impl PendingBatches { pub fn remove_batch_by_peer(&mut self, peer_id: &PeerId) -> Option> { let request_ids = self.peer_requests.get(peer_id)?; - let request_id = request_ids.iter().next()?.clone(); - self.remove(&request_id) + let request_id = *request_ids.iter().next()?; + self.remove(request_id) } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index be2fcf2c82..ddeaf0583e 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -144,11 +144,11 @@ impl SyncingChain { ) -> Option { if let Some(block) = beacon_block { // This is not a stream termination, simply add the block to the request - self.pending_batches.add_block(&request_id, block.clone())?; - return Some(ProcessingResult::KeepChain); + self.pending_batches.add_block(request_id, block.clone())?; + Some(ProcessingResult::KeepChain) } else { // A stream termination has been sent. This batch has ended. Process a completed batch. - let batch = self.pending_batches.remove(&request_id)?; + let batch = self.pending_batches.remove(request_id)?; Some(self.process_completed_batch(chain.clone(), network, batch, log)) } } @@ -433,7 +433,7 @@ impl SyncingChain { return true; } } - return false; + false } /// Returns a peer if there exists a peer which does not currently have a pending request. @@ -500,10 +500,10 @@ impl SyncingChain { &mut self, network: &mut SyncNetworkContext, peer_id: &PeerId, - request_id: &RequestId, + request_id: RequestId, log: &slog::Logger, ) -> Option { - if let Some(batch) = self.pending_batches.remove(&request_id) { + if let Some(batch) = self.pending_batches.remove(request_id) { warn!(log, "Batch failed. RPC Error"; "id" => batch.id, "retries" => batch.retries, "peer" => format!("{:?}", peer_id)); Some(self.failed_batch(network, batch, log)) diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 8f38b90c00..98ef6e6219 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -188,7 +188,7 @@ impl RangeSync { debug!(self.log, "Adding peer to the existing head chain peer pool"; "head_root" => format!("{}",remote.head_root), "head_slot" => remote.head_slot, "peer_id" => format!("{:?}", peer_id)); // add the peer to the head's pool - chain.add_peer(network, peer_id.clone(), &self.log); + chain.add_peer(network, peer_id, &self.log); } else { // There are no other head chains that match this peer's status, create a new one, and let start_slot = std::cmp::min(local_info.head_slot, remote_finalized_slot); @@ -305,29 +305,28 @@ impl RangeSync { /// retries. In this case, we need to remove the chain and re-status all the peers. fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { let log_ref = &self.log; - match self.chains.head_finalized_request(|chain| { - if chain.peer_pool.remove(peer_id) { - // this chain contained the peer - while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) { - if let ProcessingResult::RemoveChain = - chain.failed_batch(network, batch, log_ref) - { - // a single batch failed, remove the chain - return Some(ProcessingResult::RemoveChain); + if let Some((index, ProcessingResult::RemoveChain)) = + self.chains.head_finalized_request(|chain| { + if chain.peer_pool.remove(peer_id) { + // this chain contained the peer + while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) { + if let ProcessingResult::RemoveChain = + chain.failed_batch(network, batch, log_ref) + { + // a single batch failed, remove the chain + return Some(ProcessingResult::RemoveChain); + } } + // peer removed from chain, no batch failed + Some(ProcessingResult::KeepChain) + } else { + None } - // peer removed from chain, no batch failed - Some(ProcessingResult::KeepChain) - } else { - None - } - }) { - Some((index, ProcessingResult::RemoveChain)) => { - // the chain needed to be removed - debug!(self.log, "Chain being removed due to failed batch"); - self.chains.remove_chain(network, index, &self.log); - } - _ => {} // chain didn't need to be removed, ignore + }) + { + // the chain needed to be removed + debug!(self.log, "Chain being removed due to failed batch"); + self.chains.remove_chain(network, index, &self.log); } } @@ -344,7 +343,7 @@ impl RangeSync { // check that this request is pending let log_ref = &self.log; match self.chains.head_finalized_request(|chain| { - chain.inject_error(network, &peer_id, &request_id, log_ref) + chain.inject_error(network, &peer_id, request_id, log_ref) }) { Some((_, ProcessingResult::KeepChain)) => {} // error handled chain persists Some((index, ProcessingResult::RemoveChain)) => { diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index e64c1e459b..26de0a819d 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -145,7 +145,7 @@ pub fn state_at_slot( // I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary. Ok(( beacon_chain.head()?.beacon_state_root, - beacon_chain.head()?.beacon_state.clone(), + beacon_chain.head()?.beacon_state, )) } else { let root = state_root_at_slot(beacon_chain, slot)?; @@ -209,7 +209,7 @@ pub fn state_root_at_slot( // // Use `per_slot_processing` to advance the head state to the present slot, // assuming that all slots do not contain a block (i.e., they are skipped slots). - let mut state = beacon_chain.head()?.beacon_state.clone(); + let mut state = beacon_chain.head()?.beacon_state; let spec = &T::EthSpec::default_spec(); for _ in state.slot.as_u64()..slot.as_u64() { diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 1473c5e72a..221d96e251 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -54,6 +54,8 @@ pub struct NetworkInfo { pub network_chan: mpsc::UnboundedSender, } +// Allowing more than 7 arguments. +#[allow(clippy::too_many_arguments)] pub fn start_server( config: &Config, executor: &TaskExecutor, diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs index 8c30ff3f00..d9fd63ce55 100644 --- a/beacon_node/rest_api/src/router.rs +++ b/beacon_node/rest_api/src/router.rs @@ -19,6 +19,8 @@ where Box::new(item.into_future()) } +// Allowing more than 7 arguments. +#[allow(clippy::too_many_arguments)] pub fn route( req: Request, beacon_chain: Arc>, diff --git a/beacon_node/rest_api/tests/test.rs b/beacon_node/rest_api/tests/test.rs index 987b8e3264..1d438253a5 100644 --- a/beacon_node/rest_api/tests/test.rs +++ b/beacon_node/rest_api/tests/test.rs @@ -47,8 +47,7 @@ fn get_randao_reveal( .head() .expect("should get head") .beacon_state - .fork - .clone(); + .fork; let proposer_index = beacon_chain .block_proposer(slot) .expect("should get proposer index"); @@ -69,8 +68,7 @@ fn sign_block( .head() .expect("should get head") .beacon_state - .fork - .clone(); + .fork; let proposer_index = beacon_chain .block_proposer(block.slot) .expect("should get proposer index"); @@ -91,11 +89,7 @@ fn validator_produce_attestation() { .client .beacon_chain() .expect("client should have beacon chain"); - let state = beacon_chain - .head() - .expect("should get head") - .beacon_state - .clone(); + let state = beacon_chain.head().expect("should get head").beacon_state; let validator_index = 0; let duties = state @@ -169,7 +163,7 @@ fn validator_produce_attestation() { remote_node .http .validator() - .publish_attestation(attestation.clone()), + .publish_attestation(attestation), ) .expect("should publish attestation"); assert!( @@ -344,7 +338,7 @@ fn validator_block_post() { remote_node .http .validator() - .produce_block(slot, randao_reveal.clone()), + .produce_block(slot, randao_reveal), ) .expect("should fetch block from http api"); @@ -360,12 +354,12 @@ fn validator_block_post() { ); } - sign_block(beacon_chain.clone(), &mut block, spec); + sign_block(beacon_chain, &mut block, spec); let block_root = block.canonical_root(); let publish_status = env .runtime() - .block_on(remote_node.http.validator().publish_block(block.clone())) + .block_on(remote_node.http.validator().publish_block(block)) .expect("should publish block"); if cfg!(not(feature = "fake_crypto")) { @@ -419,7 +413,7 @@ fn validator_block_get() { .expect("client should have beacon chain"); let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain.clone(), slot, spec); + let randao_reveal = get_randao_reveal(beacon_chain, slot, spec); let block = env .runtime() diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 61d6f7db36..b68e1841e0 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -268,7 +268,7 @@ pub fn get_configs( if eth2_config.spec_constants != client_config.spec_constants { crit!(log, "Specification constants do not match."; "client_config" => client_config.spec_constants.to_string(), - "eth2_config" => eth2_config.spec_constants.to_string() + "eth2_config" => eth2_config.spec_constants ); return Err("Specification constant mismatch".into()); } diff --git a/beacon_node/store/benches/benches.rs b/beacon_node/store/benches/benches.rs index 5a8675ecff..eac31c36b4 100644 --- a/beacon_node/store/benches/benches.rs +++ b/beacon_node/store/benches/benches.rs @@ -24,7 +24,6 @@ fn get_state(validator_count: usize) -> BeaconState { } state.validators = (0..validator_count) - .into_iter() .collect::>() .par_iter() .map(|&i| Validator { @@ -77,7 +76,7 @@ fn all_benches(c: &mut Criterion) { .sample_size(10), ); - let inner_state = state.clone(); + let inner_state = state; c.bench( &format!("{}_validators", validator_count), Benchmark::new("encode/beacon_state/committee_cache[0]", move |b| { diff --git a/beacon_node/store/examples/ssz_encode_state_container.rs b/beacon_node/store/examples/ssz_encode_state_container.rs index f36f85d063..d44f7b4e9e 100644 --- a/beacon_node/store/examples/ssz_encode_state_container.rs +++ b/beacon_node/store/examples/ssz_encode_state_container.rs @@ -27,7 +27,6 @@ fn get_state(validator_count: usize) -> BeaconState { } state.validators = (0..validator_count) - .into_iter() .collect::>() .par_iter() .map(|&i| Validator { diff --git a/beacon_node/store/src/block_at_slot.rs b/beacon_node/store/src/block_at_slot.rs index 292d7b3838..dbb2c946e1 100644 --- a/beacon_node/store/src/block_at_slot.rs +++ b/beacon_node/store/src/block_at_slot.rs @@ -1,5 +1,6 @@ use super::*; use ssz::{Decode, DecodeError}; +use std::cmp::Ordering; fn get_block_bytes, E: EthSpec>( store: &T, @@ -45,12 +46,10 @@ fn get_at_preceeding_slot, E: EthSpec>( if let Some(bytes) = get_block_bytes::<_, E>(store, root)? { let this_slot = read_slot_from_block_bytes(&bytes)?; - if this_slot == slot { - break Ok(Some((root, bytes))); - } else if this_slot < slot { - break Ok(None); - } else { - root = read_parent_root_from_block_bytes(&bytes)?; + match this_slot.cmp(&slot) { + Ordering::Equal => break Ok(Some((root, bytes))), + Ordering::Less => break Ok(None), + Ordering::Greater => root = read_parent_root_from_block_bytes(&bytes)?, } } else { break Ok(None); diff --git a/beacon_node/store/src/block_root_tree.rs b/beacon_node/store/src/block_root_tree.rs index 76950069c9..dba1b1c24b 100644 --- a/beacon_node/store/src/block_root_tree.rs +++ b/beacon_node/store/src/block_root_tree.rs @@ -237,7 +237,7 @@ mod test { .add_block_root(int_hash(i), int_hash(i - 1), Slot::new(i)) .expect("add_block_root ok"); - let expected = (1..i + 1) + let expected = (1..=i) .rev() .map(|j| (int_hash(j), Slot::new(j))) .collect::>(); @@ -262,12 +262,12 @@ mod test { .add_block_root(int_hash(i), int_hash(i - step_length), Slot::new(i)) .expect("add_block_root ok"); - let sparse_expected = (1..i + 1) + let sparse_expected = (1..=i) .rev() .step_by(step_length as usize) .map(|j| (int_hash(j), Slot::new(j))) .collect_vec(); - let every_slot_expected = (1..i + 1) + let every_slot_expected = (1..=i) .rev() .map(|j| { let nearest = 1 + (j - 1) / step_length * step_length; @@ -343,10 +343,9 @@ mod test { // Check that advancing the finalized root onto one side completely removes the other // side. - let fin_tree = tree.clone(); + let fin_tree = tree; let prune_point = num_blocks / 2; let remaining_fork1_blocks = all_fork1_blocks - .clone() .into_iter() .take_while(|(_, slot)| *slot >= prune_point) .collect_vec(); diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 4dd05d7168..b876545c7d 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -185,7 +185,7 @@ pub trait Field: Copy { .values .first() .cloned() - .ok_or(ChunkError::MissingGenesisValue.into()) + .ok_or_else(|| ChunkError::MissingGenesisValue.into()) } /// Store the given `value` as the genesis value for this field, unless stored already. @@ -685,7 +685,7 @@ mod test { ]; assert_eq!( - stitch(chunks.clone(), 2, 6, chunk_size, 12, 99).unwrap(), + stitch(chunks, 2, 6, chunk_size, 12, 99).unwrap(), vec![99, 99, 2, 3, 4, 5, 99, 99, 99, 99, 99, 99] ); } @@ -707,7 +707,7 @@ mod test { ); assert_eq!( - stitch(chunks.clone(), 2, 10, chunk_size, 8, default).unwrap(), + stitch(chunks, 2, 10, chunk_size, 8, default).unwrap(), vec![v(8), v(9), v(2), v(3), v(4), v(5), v(6), v(7)] ); } diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index bcec40935b..1c2431fc3f 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -20,9 +20,9 @@ pub struct SimpleForwardsBlockRootsIterator { /// Fusion of the above two approaches to forwards iteration. Fast and efficient. pub enum HybridForwardsBlockRootsIterator { PreFinalization { - iter: FrozenForwardsBlockRootsIterator, + iter: Box>, /// Data required by the `PostFinalization` iterator when we get to it. - continuation_data: Option<(BeaconState, Hash256)>, + continuation_data: Box, Hash256)>>, }, PostFinalization { iter: SimpleForwardsBlockRootsIterator, @@ -99,13 +99,13 @@ impl HybridForwardsBlockRootsIterator { if start_slot < latest_restore_point_slot { PreFinalization { - iter: FrozenForwardsBlockRootsIterator::new( + iter: Box::new(FrozenForwardsBlockRootsIterator::new( store, start_slot, latest_restore_point_slot, spec, - ), - continuation_data: Some((end_state, end_block_root)), + )), + continuation_data: Box::new(Some((end_state, end_block_root))), } } else { PostFinalization { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c9acf76314..90d9b74413 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -145,14 +145,15 @@ impl Store for HotColdDB { let current_split_slot = store.get_split_slot(); if frozen_head.slot < current_split_slot { - Err(HotColdDBError::FreezeSlotError { + return Err(HotColdDBError::FreezeSlotError { current_split_slot, proposed_split_slot: frozen_head.slot, - })?; + } + .into()); } if frozen_head.slot % E::slots_per_epoch() != 0 { - Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot))?; + return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot).into()); } // 1. Copy all of the states between the head and the split slot, from the hot DB @@ -574,7 +575,7 @@ impl HotColdDB { let key = Self::restore_point_key(restore_point_index); RestorePointHash::db_get(&self.cold_db, &key)? .map(|r| r.state_root) - .ok_or(HotColdDBError::MissingRestorePointHash(restore_point_index).into()) + .ok_or_else(|| HotColdDBError::MissingRestorePointHash(restore_point_index).into()) } /// Store the state root of a restore point. diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 07078aceea..c17ee70d74 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -345,7 +345,7 @@ mod test { state_b.state_roots[0] = state_a_root; store.put_state(&state_a_root, &state_a).unwrap(); - let iter = BlockRootsIterator::new(store.clone(), &state_b); + let iter = BlockRootsIterator::new(store, &state_b); assert!( iter.clone().any(|(_root, slot)| slot == 0), @@ -394,7 +394,7 @@ mod test { store.put_state(&state_a_root, &state_a).unwrap(); store.put_state(&state_b_root, &state_b).unwrap(); - let iter = StateRootsIterator::new(store.clone(), &state_b); + let iter = StateRootsIterator::new(store, &state_b); assert!( iter.clone().any(|(_root, slot)| slot == 0), diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 1695756512..59caf9eaad 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -47,11 +47,7 @@ impl Store for MemoryStore { fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { let column_key = Self::get_key_for_col(col, key); - Ok(self - .db - .read() - .get(&column_key) - .and_then(|val| Some(val.clone()))) + Ok(self.db.read().get(&column_key).cloned()) } /// Puts a key in the database. diff --git a/beacon_node/store/src/migrate.rs b/beacon_node/store/src/migrate.rs index 2d6cd604b2..ffe518a894 100644 --- a/beacon_node/store/src/migrate.rs +++ b/beacon_node/store/src/migrate.rs @@ -60,13 +60,12 @@ impl> Migrate for BlockingMigrator { } } +type MpscSender = mpsc::Sender<(Hash256, BeaconState)>; + /// Migrator that runs a background thread to migrate state from the hot to the cold database. pub struct BackgroundMigrator { db: Arc>, - tx_thread: Mutex<( - mpsc::Sender<(Hash256, BeaconState)>, - thread::JoinHandle<()>, - )>, + tx_thread: Mutex<(MpscSender, thread::JoinHandle<()>)>, } impl Migrate, E> for BackgroundMigrator { diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 547bdf247f..d0798afd20 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -8,6 +8,7 @@ use itertools::Itertools; use parking_lot::RwLock; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use std::cmp::Ordering; use std::collections::HashMap; use std::fmt; use std::marker::PhantomData; @@ -182,13 +183,15 @@ impl ReducedTreeSsz { } } - pub fn to_reduced_tree( + pub fn into_reduced_tree( self, store: Arc, block_root_tree: Arc, ) -> Result> { if self.node_hashes.len() != self.nodes.len() { - Error::InvalidReducedTreeSsz("node_hashes and nodes should have equal length".into()); + return Err(Error::InvalidReducedTreeSsz( + "node_hashes and nodes should have equal length".to_string(), + )); } let nodes: HashMap<_, _> = self .node_hashes @@ -740,16 +743,19 @@ where if a_slot < self.root.1 || b_slot < self.root.1 { None } else { - if a_slot < b_slot { - for _ in a_slot.as_u64()..b_slot.as_u64() { - b_root = b_iter.next()?.0; + match a_slot.cmp(&b_slot) { + Ordering::Less => { + for _ in a_slot.as_u64()..b_slot.as_u64() { + b_root = b_iter.next()?.0; + } } - } else if a_slot > b_slot { - for _ in b_slot.as_u64()..a_slot.as_u64() { - a_root = a_iter.next()?.0; + Ordering::Greater => { + for _ in b_slot.as_u64()..a_slot.as_u64() { + a_root = a_iter.next()?.0; + } } + Ordering::Equal => (), } - Some((a_root, b_root)) } } @@ -876,7 +882,7 @@ where block_root_tree: Arc, ) -> Result { let reduced_tree_ssz = ReducedTreeSsz::from_ssz_bytes(bytes)?; - Ok(reduced_tree_ssz.to_reduced_tree(store, block_root_tree)?) + Ok(reduced_tree_ssz.into_reduced_tree(store, block_root_tree)?) } } @@ -1013,8 +1019,7 @@ mod tests { ); let ssz_tree = ReducedTreeSsz::from_reduced_tree(&tree); let bytes = tree.as_bytes(); - let recovered_tree = - ReducedTree::from_bytes(&bytes, store.clone(), block_root_tree).unwrap(); + let recovered_tree = ReducedTree::from_bytes(&bytes, store, block_root_tree).unwrap(); let recovered_ssz = ReducedTreeSsz::from_reduced_tree(&recovered_tree); assert_eq!(ssz_tree, recovered_ssz); diff --git a/eth2/operation_pool/src/max_cover.rs b/eth2/operation_pool/src/max_cover.rs index 15d528e45b..8188d6939b 100644 --- a/eth2/operation_pool/src/max_cover.rs +++ b/eth2/operation_pool/src/max_cover.rs @@ -167,7 +167,7 @@ mod test { HashSet::from_iter(vec![5, 6, 7, 8]), // 4, 4* HashSet::from_iter(vec![0, 1, 2, 3, 4]), // 5* ]; - let cover = maximum_cover(sets.clone(), 3); + let cover = maximum_cover(sets, 3); assert_eq!(quality(&cover), 11); } @@ -182,7 +182,7 @@ mod test { HashSet::from_iter(vec![1, 5, 6, 8]), HashSet::from_iter(vec![1, 7, 11, 19]), ]; - let cover = maximum_cover(sets.clone(), 5); + let cover = maximum_cover(sets, 5); assert_eq!(quality(&cover), 19); assert_eq!(cover.len(), 5); } diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index c277e6734f..f03c571ad3 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -311,11 +311,7 @@ fn bench_block( ) .expect("should get indexed attestation"); - ( - local_spec.clone(), - local_state.clone(), - indexed_attestation.clone(), - ) + (local_spec.clone(), local_state.clone(), indexed_attestation) }, |(spec, ref mut state, indexed_attestation)| { black_box( @@ -349,11 +345,7 @@ fn bench_block( ) .expect("should get indexed attestation"); - ( - local_spec.clone(), - local_state.clone(), - indexed_attestation.clone(), - ) + (local_spec.clone(), local_state.clone(), indexed_attestation) }, |(spec, ref mut state, indexed_attestation)| { black_box( @@ -373,7 +365,7 @@ fn bench_block( ); let local_block = block.clone(); - let local_state = state.clone(); + let local_state = state; c.bench( &title, Benchmark::new("get_attesting_indices", move |b| { @@ -409,7 +401,7 @@ fn bench_block( .sample_size(10), ); - let local_block = block.clone(); + let local_block = block; c.bench( &title, Benchmark::new("ssz_block_len", move |b| { diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 77bc14ba30..83f8cdd164 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -29,7 +29,7 @@ where F: FnMut(&mut BlockBuilder), G: FnMut(&mut BeaconBlock), { - let (mut block, state) = get_block::(mutate_builder); + let (mut block, mut state) = get_block::(mutate_builder); /* * Control check to ensure the valid block should pass verification. @@ -79,7 +79,7 @@ where assert_eq!( per_block_processing( - &mut state.clone(), + &mut state, &block, None, BlockSignatureStrategy::VerifyBulk, diff --git a/eth2/types/benches/benches.rs b/eth2/types/benches/benches.rs index dd66c71742..e38b9ded34 100644 --- a/eth2/types/benches/benches.rs +++ b/eth2/types/benches/benches.rs @@ -22,7 +22,6 @@ fn get_state(validator_count: usize) -> BeaconState { } state.validators = (0..validator_count) - .into_iter() .collect::>() .par_iter() .map(|&i| Validator { @@ -91,7 +90,7 @@ fn all_benches(c: &mut Criterion) { .sample_size(10), ); - let inner_state = state.clone(); + let inner_state = state; c.bench( &format!("{}_validators", validator_count), Benchmark::new("clone_without_caches/beacon_state", move |b| { diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index e1b2078e93..66306ba18f 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -71,13 +71,13 @@ impl Attestation { if self .aggregation_bits .get(committee_position) - .map_err(|e| Error::SszTypesError(e))? + .map_err(Error::SszTypesError)? { Err(Error::AlreadySigned(committee_position)) } else { self.aggregation_bits .set(committee_position, true) - .map_err(|e| Error::SszTypesError(e))?; + .map_err(Error::SszTypesError)?; let message = self.data.tree_hash_root(); let domain = spec.get_domain(self.data.target.epoch, Domain::BeaconAttester, fork); diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index dd483a7740..ac17d6d04a 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -897,7 +897,7 @@ impl BeaconState { .enumerate() .skip(self.pubkey_cache.len()) { - let success = self.pubkey_cache.insert(validator.pubkey.clone().into(), i); + let success = self.pubkey_cache.insert(validator.pubkey.clone(), i); if !success { return Err(Error::PubkeyCacheInconsistent); } @@ -957,7 +957,7 @@ impl BeaconState { validator .pubkey .decompress() - .map_err(|e| Error::InvalidValidatorPubkey(e)) + .map_err(Error::InvalidValidatorPubkey) } else { Ok(()) } diff --git a/eth2/types/src/test_utils/builders/testing_attestation_builder.rs b/eth2/types/src/test_utils/builders/testing_attestation_builder.rs index 3d877f8812..a0e34128ca 100644 --- a/eth2/types/src/test_utils/builders/testing_attestation_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attestation_builder.rs @@ -24,9 +24,8 @@ impl TestingAttestationBuilder { let mut aggregation_bits_len = committee.len(); - match test_task { - AttestationTestTask::BadAggregationBitfieldLen => aggregation_bits_len += 1, - _ => (), + if test_task == AttestationTestTask::BadAggregationBitfieldLen { + aggregation_bits_len += 1 } let mut aggregation_bits = BitList::with_capacity(aggregation_bits_len).unwrap(); diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 04f22fa89b..333f221ad7 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -15,7 +15,7 @@ pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; /// `./keypairs.raw_keypairs`. pub fn keypairs_path() -> PathBuf { let dir = dirs::home_dir() - .and_then(|home| Some(home.join(".lighthouse"))) + .map(|home| (home.join(".lighthouse"))) .unwrap_or_else(|| PathBuf::from("")); dir.join(KEYPAIRS_FILE) } @@ -42,7 +42,7 @@ impl TestingBeaconStateBuilder { /// If the file does not contain enough keypairs or is invalid. pub fn from_default_keypairs_file_if_exists(validator_count: usize, spec: &ChainSpec) -> Self { let dir = dirs::home_dir() - .and_then(|home| Some(home.join(".lighthouse"))) + .map(|home| home.join(".lighthouse")) .unwrap_or_else(|| PathBuf::from("")); let file = dir.join(KEYPAIRS_FILE); diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index bfd4bd334d..182236a366 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -30,7 +30,7 @@ impl TestingProposerSlashingBuilder { let slot = Slot::new(0); let hash_1 = Hash256::from([1; 32]); let hash_2 = if test_task == ProposerSlashingTestTask::ProposalsIdentical { - hash_1.clone() + hash_1 } else { Hash256::from([2; 32]) }; diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index 146fbfd5db..8d930f0cc4 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -99,9 +99,10 @@ impl AggregateSignature { for byte in bytes { if *byte != 0 { let sig = RawAggregateSignature::from_bytes(&bytes).map_err(|_| { - DecodeError::BytesInvalid( - format!("Invalid AggregateSignature bytes: {:?}", bytes).to_string(), - ) + DecodeError::BytesInvalid(format!( + "Invalid AggregateSignature bytes: {:?}", + bytes + )) })?; return Ok(Self { diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index 095d1989f5..42ed592efb 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -39,7 +39,7 @@ impl PublicKey { /// Converts compressed bytes to PublicKey pub fn from_bytes(bytes: &[u8]) -> Result { let pubkey = RawPublicKey::from_bytes(&bytes).map_err(|_| { - DecodeError::BytesInvalid(format!("Invalid PublicKey bytes: {:?}", bytes).to_string()) + DecodeError::BytesInvalid(format!("Invalid PublicKey bytes: {:?}", bytes)) })?; Ok(PublicKey(pubkey)) diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index b3912f7351..b6ba37017f 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -81,9 +81,7 @@ impl Signature { for byte in bytes { if *byte != 0 { let raw_signature = RawSignature::from_bytes(&bytes).map_err(|_| { - DecodeError::BytesInvalid( - format!("Invalid Signature bytes: {:?}", bytes).to_string(), - ) + DecodeError::BytesInvalid(format!("Invalid Signature bytes: {:?}", bytes)) })?; return Ok(Signature { signature: raw_signature, diff --git a/eth2/utils/bls/src/signature_set.rs b/eth2/utils/bls/src/signature_set.rs index 27eebe1aff..10ef6d65f8 100644 --- a/eth2/utils/bls/src/signature_set.rs +++ b/eth2/utils/bls/src/signature_set.rs @@ -158,17 +158,17 @@ fn aggregate_public_keys<'a>(public_keys: &'a [Cow<'a, G1Point>]) -> G1Point { } pub trait G1Ref { - fn g1_ref<'a>(&'a self) -> Cow<'a, G1Point>; + fn g1_ref(&self) -> Cow<'_, G1Point>; } impl G1Ref for AggregatePublicKey { - fn g1_ref<'a>(&'a self) -> Cow<'a, G1Point> { + fn g1_ref(&self) -> Cow<'_, G1Point> { Cow::Borrowed(&self.as_raw().point) } } impl G1Ref for PublicKey { - fn g1_ref<'a>(&'a self) -> Cow<'a, G1Point> { + fn g1_ref(&self) -> Cow<'_, G1Point> { Cow::Borrowed(&self.as_raw().point) } } diff --git a/eth2/utils/cached_tree_hash/src/test.rs b/eth2/utils/cached_tree_hash/src/test.rs index 68173fd6a6..0e3679d9fd 100644 --- a/eth2/utils/cached_tree_hash/src/test.rs +++ b/eth2/utils/cached_tree_hash/src/test.rs @@ -131,17 +131,15 @@ fn variable_list_h256_test(leaves_and_skips: Vec<(u64, bool)>) -> for (end, (_, update_cache)) in leaves_and_skips.into_iter().enumerate() { list = VariableList::new(leaves[..end].to_vec()).unwrap(); - if update_cache { - if list + if update_cache + && list .recalculate_tree_hash_root(&mut cache) .unwrap() .as_bytes() != &list.tree_hash_root()[..] - { - return false; - } + { + return false; } } - true } diff --git a/eth2/utils/deposit_contract/src/lib.rs b/eth2/utils/deposit_contract/src/lib.rs index a235d0ff71..ba473032c0 100644 --- a/eth2/utils/deposit_contract/src/lib.rs +++ b/eth2/utils/deposit_contract/src/lib.rs @@ -58,7 +58,7 @@ mod tests { let spec = &E::default_spec(); let keypair = generate_deterministic_keypair(42); - let deposit = get_deposit(keypair.clone(), spec); + let deposit = get_deposit(keypair, spec); let data = eth1_tx_data(&deposit).expect("should produce tx data"); diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 6fa303553b..94b0b207fe 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -123,8 +123,7 @@ fn string_to_bytes(string: &str) -> Result, String> { /// Uses this as reference: /// https://github.com/ethereum/eth2.0-pm/blob/9a9dbcd95e2b8e10287797bd768014ab3d842e99/interop/mocked_start/keygen_10_validators.yaml pub fn keypairs_from_yaml_file(path: PathBuf) -> Result, String> { - let file = - File::open(path.clone()).map_err(|e| format!("Unable to open YAML key file: {}", e))?; + let file = File::open(path).map_err(|e| format!("Unable to open YAML key file: {}", e))?; serde_yaml::from_reader::<_, Vec>(file) .map_err(|e| format!("Could not parse YAML: {:?}", e))? diff --git a/eth2/utils/eth2_testnet_config/src/lib.rs b/eth2/utils/eth2_testnet_config/src/lib.rs index 57575fead0..2c1d455dd3 100644 --- a/eth2/utils/eth2_testnet_config/src/lib.rs +++ b/eth2/utils/eth2_testnet_config/src/lib.rs @@ -227,7 +227,7 @@ mod tests { let genesis_state = Some(BeaconState::new(42, eth1_data, spec)); let yaml_config = Some(YamlConfig::from_spec::(spec)); - do_test::(boot_enr, genesis_state.clone(), yaml_config.clone()); + do_test::(boot_enr, genesis_state, yaml_config); do_test::(None, None, None); } @@ -237,13 +237,13 @@ mod tests { yaml_config: Option, ) { let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir"); - let base_dir = PathBuf::from(temp_dir.path().join("my_testnet")); + let base_dir = temp_dir.path().join("my_testnet"); let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string(); let deposit_contract_deploy_block = 42; let testnet: Eth2TestnetConfig = Eth2TestnetConfig { - deposit_contract_address: deposit_contract_address.clone(), - deposit_contract_deploy_block: deposit_contract_deploy_block, + deposit_contract_address, + deposit_contract_deploy_block, boot_enr, genesis_state, yaml_config, diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs index 7c229b5929..c7d312a4f4 100644 --- a/eth2/utils/lighthouse_metrics/src/lib.rs +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::needless_doctest_main)] //! A wrapper around the `prometheus` crate that provides a global, `lazy_static` metrics registry //! and functions to add and use the following components (more info at //! [Prometheus docs](https://prometheus.io/docs/concepts/metric_types/)): diff --git a/eth2/utils/ssz/src/decode.rs b/eth2/utils/ssz/src/decode.rs index abcebda073..db80c24094 100644 --- a/eth2/utils/ssz/src/decode.rs +++ b/eth2/utils/ssz/src/decode.rs @@ -99,7 +99,7 @@ impl<'a> SszDecoderBuilder<'a> { let previous_offset = self .offsets .last() - .and_then(|o| Some(o.offset)) + .map(|o| o.offset) .unwrap_or_else(|| BYTES_PER_LENGTH_OFFSET); if (previous_offset > offset) || (offset > self.bytes.len()) { @@ -179,7 +179,7 @@ impl<'a> SszDecoderBuilder<'a> { /// b: Vec, /// } /// -/// fn main() { +/// fn ssz_decoding_example() { /// let foo = Foo { /// a: 42, /// b: vec![1, 3, 3, 7] diff --git a/eth2/utils/ssz/src/decode/impls.rs b/eth2/utils/ssz/src/decode/impls.rs index 1a12e7e193..39b7fa3c1a 100644 --- a/eth2/utils/ssz/src/decode/impls.rs +++ b/eth2/utils/ssz/src/decode/impls.rs @@ -207,9 +207,10 @@ impl Decode for bool { match bytes[0] { 0b0000_0000 => Ok(false), 0b0000_0001 => Ok(true), - _ => Err(DecodeError::BytesInvalid( - format!("Out-of-range for boolean: {}", bytes[0]).to_string(), - )), + _ => Err(DecodeError::BytesInvalid(format!( + "Out-of-range for boolean: {}", + bytes[0] + ))), } } } diff --git a/eth2/utils/ssz/src/encode.rs b/eth2/utils/ssz/src/encode.rs index 52b3d9bfd9..91281e0505 100644 --- a/eth2/utils/ssz/src/encode.rs +++ b/eth2/utils/ssz/src/encode.rs @@ -64,7 +64,7 @@ pub trait Encode { /// b: Vec, /// } /// -/// fn main() { +/// fn ssz_encode_example() { /// let foo = Foo { /// a: 42, /// b: vec![1, 3, 3, 7] diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs index 115633889e..89e707395a 100644 --- a/eth2/utils/ssz/src/lib.rs +++ b/eth2/utils/ssz/src/lib.rs @@ -17,7 +17,7 @@ //! b: Vec, //! } //! -//! fn main() { +//! fn ssz_encode_decode_example() { //! let foo = Foo { //! a: 42, //! b: vec![1, 3, 3, 7] diff --git a/eth2/utils/ssz/tests/tests.rs b/eth2/utils/ssz/tests/tests.rs index 26f2f53efe..f82e5d6e3f 100644 --- a/eth2/utils/ssz/tests/tests.rs +++ b/eth2/utils/ssz/tests/tests.rs @@ -2,6 +2,7 @@ use ethereum_types::H256; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; +#[allow(clippy::zero_prefixed_literal)] mod round_trip { use super::*; diff --git a/eth2/utils/ssz_types/src/bitfield.rs b/eth2/utils/ssz_types/src/bitfield.rs index cc01d40c7e..d18267ee28 100644 --- a/eth2/utils/ssz_types/src/bitfield.rs +++ b/eth2/utils/ssz_types/src/bitfield.rs @@ -739,6 +739,7 @@ mod bitvector { } #[cfg(test)] +#[allow(clippy::cognitive_complexity)] mod bitlist { use super::*; use crate::BitList; @@ -937,7 +938,7 @@ mod bitlist { fn test_set_unset(num_bits: usize) { let mut bitfield = BitList1024::with_capacity(num_bits).unwrap(); - for i in 0..num_bits + 1 { + for i in 0..=num_bits { if i < num_bits { // Starts as false assert_eq!(bitfield.get(i), Ok(false)); @@ -1023,10 +1024,7 @@ mod bitlist { vec![0b1111_1111, 0b0000_0000] ); bitfield.set(8, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - vec![0b1111_1111, 0b0000_0001] - ); + assert_eq!(bitfield.into_raw_bytes(), vec![0b1111_1111, 0b0000_0001]); } #[test] diff --git a/eth2/utils/ssz_types/src/fixed_vector.rs b/eth2/utils/ssz_types/src/fixed_vector.rs index f9c8963313..eb63d6d39c 100644 --- a/eth2/utils/ssz_types/src/fixed_vector.rs +++ b/eth2/utils/ssz_types/src/fixed_vector.rs @@ -261,15 +261,15 @@ mod test { #[test] fn new() { let vec = vec![42; 5]; - let fixed: Result, _> = FixedVector::new(vec.clone()); + let fixed: Result, _> = FixedVector::new(vec); assert!(fixed.is_err()); let vec = vec![42; 3]; - let fixed: Result, _> = FixedVector::new(vec.clone()); + let fixed: Result, _> = FixedVector::new(vec); assert!(fixed.is_err()); let vec = vec![42; 4]; - let fixed: Result, _> = FixedVector::new(vec.clone()); + let fixed: Result, _> = FixedVector::new(vec); assert!(fixed.is_ok()); } @@ -299,7 +299,7 @@ mod test { assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]); let vec = vec![]; - let fixed: FixedVector = FixedVector::from(vec.clone()); + let fixed: FixedVector = FixedVector::from(vec); assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]); } diff --git a/eth2/utils/ssz_types/src/variable_list.rs b/eth2/utils/ssz_types/src/variable_list.rs index feb656745b..c033315123 100644 --- a/eth2/utils/ssz_types/src/variable_list.rs +++ b/eth2/utils/ssz_types/src/variable_list.rs @@ -247,15 +247,15 @@ mod test { #[test] fn new() { let vec = vec![42; 5]; - let fixed: Result, _> = VariableList::new(vec.clone()); + let fixed: Result, _> = VariableList::new(vec); assert!(fixed.is_err()); let vec = vec![42; 3]; - let fixed: Result, _> = VariableList::new(vec.clone()); + let fixed: Result, _> = VariableList::new(vec); assert!(fixed.is_ok()); let vec = vec![42; 4]; - let fixed: Result, _> = VariableList::new(vec.clone()); + let fixed: Result, _> = VariableList::new(vec); assert!(fixed.is_ok()); } @@ -285,7 +285,7 @@ mod test { assert_eq!(&fixed[..], &vec![42, 42, 42][..]); let vec = vec![]; - let fixed: VariableList = VariableList::from(vec.clone()); + let fixed: VariableList = VariableList::from(vec); assert_eq!(&fixed[..], &vec![][..]); } diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index ab61cf08ef..b15e9ceb24 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -46,7 +46,7 @@ fn get_hashable_fields_and_their_caches<'a>( /// /// Return `Some(cache_field_name)` if the field has a cached tree hash attribute, /// or `None` otherwise. -fn get_cache_field_for<'a>(field: &'a syn::Field) -> Option { +fn get_cache_field_for(field: &syn::Field) -> Option { use syn::{MetaList, NestedMeta}; let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs); diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs index f40849c351..76662309a2 100644 --- a/lcli/src/deploy_deposit_contract.rs +++ b/lcli/src/deploy_deposit_contract.rs @@ -94,12 +94,12 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< info!("Writing config to {:?}", output_dir); - let mut spec = lighthouse_testnet_spec(env.core_context().eth2_config.spec.clone()); + let mut spec = lighthouse_testnet_spec(env.core_context().eth2_config.spec); spec.min_genesis_time = min_genesis_time; spec.min_genesis_active_validator_count = min_genesis_active_validator_count; let testnet_config: Eth2TestnetConfig = Eth2TestnetConfig { - deposit_contract_address: format!("{}", deposit_contract.address()), + deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: deploy_block.as_u64(), boot_enr: None, genesis_state: None, @@ -152,7 +152,7 @@ pub fn parse_password(matches: &ArgMatches) -> Result, String> { }) .map(|password| { // Trim the linefeed from the end. - if password.ends_with("\n") { + if password.ends_with('\n') { password[0..password.len() - 1].to_string() } else { password diff --git a/lcli/src/refund_deposit_contract.rs b/lcli/src/refund_deposit_contract.rs index 1684a5d89b..d413b7f5ce 100644 --- a/lcli/src/refund_deposit_contract.rs +++ b/lcli/src/refund_deposit_contract.rs @@ -110,7 +110,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< env.runtime() .block_on(future) - .map_err(|()| format!("Failed to send transaction"))?; + .map_err(|()| "Failed to send transaction".to_string())?; Ok(()) } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 23e19acaed..fb583eeb52 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -34,8 +34,8 @@ pub fn run_transition_blocks(matches: &ArgMatches) -> Result<(), Str let post_state = do_transition(pre_state, block)?; - let mut output_file = File::create(output_path.clone()) - .map_err(|e| format!("Unable to create output file: {:?}", e))?; + let mut output_file = + File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?; output_file .write_all(&post_state.as_ssz_bytes()) diff --git a/tests/beacon_chain_sim/src/main.rs b/tests/beacon_chain_sim/src/main.rs index 4eea1cd24a..e03232e010 100644 --- a/tests/beacon_chain_sim/src/main.rs +++ b/tests/beacon_chain_sim/src/main.rs @@ -78,7 +78,7 @@ fn async_sim( let spec = &mut env.eth2_config.spec; - spec.milliseconds_per_slot = spec.milliseconds_per_slot / speed_up_factor; + spec.milliseconds_per_slot /= speed_up_factor; spec.eth1_follow_distance = 16; spec.seconds_per_day = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; spec.min_genesis_time = 0; diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 45cd997984..cf3507cd7a 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -194,8 +194,9 @@ impl AttestationService { .into_iter() .for_each(|duty| { if let Some(committee_index) = duty.attestation_committee_index { - let validator_duties = - committee_indices.entry(committee_index).or_insert(vec![]); + let validator_duties = committee_indices + .entry(committee_index) + .or_insert_with(|| vec![]); validator_duties.push(duty); } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index ea1f1841a3..eb91f0a414 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -443,7 +443,7 @@ impl DutiesService { /// Attempt to download the duties of all managed validators for the given `epoch`. fn update_epoch(self, epoch: Epoch) -> impl Future { let service_1 = self.clone(); - let service_2 = self.clone(); + let service_2 = self; let pubkeys = service_1.validator_store.voting_pubkeys(); service_1 diff --git a/validator_client/src/validator_directory.rs b/validator_client/src/validator_directory.rs index 15d2edd9a2..66f58b3b1b 100644 --- a/validator_client/src/validator_directory.rs +++ b/validator_client/src/validator_directory.rs @@ -384,12 +384,11 @@ mod tests { "withdrawal keypair should be as expected" ); assert!( - created_dir + !created_dir .deposit_data .clone() .expect("should have data") - .len() - > 0, + .is_empty(), "should have some deposit data" ); From f8cff3bd2e1be3f1ce1bdf439ae59536024f094e Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Thu, 23 Jan 2020 04:35:13 +0400 Subject: [PATCH 09/16] Optimize block production (#820) * Remove SignatureVerif on block production; short-circuit fetching attestations when num attestations < T::MaxAttestation * Cargo fmt * Remove short-circuiting --- eth2/operation_pool/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 3beb2f28e3..9044fc6e47 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -96,6 +96,8 @@ impl OperationPool { } /// Get a list of attestations for inclusion in a block. + /// + /// NOTE: Assumes that all attestations in the operation_pool are valid. pub fn get_attestations( &self, state: &BeaconState, @@ -125,7 +127,7 @@ impl OperationPool { verify_attestation_for_block_inclusion( state, attestation, - VerifySignatures::True, + VerifySignatures::False, spec, ) .is_ok() From fdb6e28f94ab146f7bf84385265763f63d2af26b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 23 Jan 2020 17:30:49 +1100 Subject: [PATCH 10/16] Super/Silky smooth syncs (#816) * Initial block processing thread design * Correct compilation issues * Increase logging and request from all given peers * Patch peer request bug * Adds fork choice to block processing * Adds logging for bug isolation * Patch syncing for chains with skip-slots * Bump block processing error logs * Improve logging for attestation processing * Randomize peer selection during sync * Resuming chains restarts from local finalized slot * Downgrades Arc batches to Rc batches * Add clippy fixes * Downgrade Rc to Option to pass processed batches to chains * Add reviewers suggestions --- beacon_node/beacon_chain/src/beacon_chain.rs | 1 - beacon_node/eth2-libp2p/src/rpc/handler.rs | 29 +- beacon_node/network/src/message_processor.rs | 4 +- beacon_node/network/src/sync/manager.rs | 24 +- .../network/src/sync/range_sync/batch.rs | 67 +- .../src/sync/range_sync/batch_processing.rs | 193 +++++ .../network/src/sync/range_sync/chain.rs | 753 ++++++++---------- .../src/sync/range_sync/chain_collection.rs | 69 +- .../network/src/sync/range_sync/mod.rs | 3 + .../network/src/sync/range_sync/range.rs | 93 ++- 10 files changed, 759 insertions(+), 477 deletions(-) create mode 100644 beacon_node/network/src/sync/range_sync/batch_processing.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4a27088306..d8b3b547cb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -10,7 +10,6 @@ use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use crate::timeout_rw_lock::TimeoutRwLock; use lmd_ghost::LmdGhost; use operation_pool::{OperationPool, PersistedOperationPool}; -use parking_lot::RwLock; use slog::{debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 4f2908c932..9d90203865 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -314,6 +314,7 @@ where substream: out, request, }; + debug!(self.log, "Added outbound substream id"; "substream_id" => id); self.outbound_substreams .insert(id, (awaiting_stream, delay_key)); } @@ -418,6 +419,8 @@ where }; if self.pending_error.is_none() { self.pending_error = Some((request_id, error)); + } else { + crit!(self.log, "Couldn't add error"); } } @@ -448,6 +451,7 @@ where } ProtocolsHandlerUpgrErr::Timeout | ProtocolsHandlerUpgrErr::Timer => { // negotiation timeout, mark the request as failed + debug!(self.log, "Active substreams before timeout"; "len" => self.outbound_substreams.len()); return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error( request_id, @@ -707,21 +711,18 @@ where } // establish outbound substreams - if !self.dial_queue.is_empty() { - if self.dial_negotiated < self.max_dial_negotiated { - self.dial_negotiated += 1; - let rpc_event = self.dial_queue.remove(0); - if let RPCEvent::Request(id, req) = rpc_event { - return Ok(Async::Ready( - ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(req.clone()), - info: RPCEvent::Request(id, req), - }, - )); - } - } - } else { + if !self.dial_queue.is_empty() && self.dial_negotiated < self.max_dial_negotiated { + self.dial_negotiated += 1; + let rpc_event = self.dial_queue.remove(0); self.dial_queue.shrink_to_fit(); + if let RPCEvent::Request(id, req) = rpc_event { + return Ok(Async::Ready( + ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(req.clone()), + info: RPCEvent::Request(id, req), + }, + )); + } } Ok(Async::NotReady) } diff --git a/beacon_node/network/src/message_processor.rs b/beacon_node/network/src/message_processor.rs index dc4f91d2f8..f12d4d0ded 100644 --- a/beacon_node/network/src/message_processor.rs +++ b/beacon_node/network/src/message_processor.rs @@ -562,9 +562,9 @@ impl MessageProcessor { self.log, "Processed attestation"; "source" => "gossip", - "outcome" => format!("{:?}", outcome), "peer" => format!("{:?}",peer_id), - "data" => format!("{:?}", msg.data) + "block_root" => format!("{}", msg.data.beacon_block_root), + "slot" => format!("{}", msg.data.slot), ); } AttestationProcessingOutcome::UnknownHeadBlock { beacon_block_root } => { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 3883f27396..8b7e210c90 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -34,7 +34,7 @@ //! subsequently search for parents if needed. use super::network_context::SyncNetworkContext; -use super::range_sync::RangeSync; +use super::range_sync::{Batch, BatchProcessResult, RangeSync}; use crate::message_processor::PeerSyncInfo; use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; @@ -45,6 +45,7 @@ use fnv::FnvHashMap; use futures::prelude::*; use slog::{crit, debug, error, info, trace, warn, Logger}; use smallvec::SmallVec; +use std::boxed::Box; use std::collections::HashSet; use std::ops::Sub; use std::sync::Weak; @@ -94,6 +95,13 @@ pub enum SyncMessage { /// An RPC Error has occurred on a request. RPCError(PeerId, RequestId), + + /// A batch has been processed by the block processor thread. + BatchProcessed { + process_id: u64, + batch: Box>, + result: BatchProcessResult, + }, } /// Maintains a sequential list of parents to lookup and the lookup's current state. @@ -185,7 +193,7 @@ pub fn spawn( state: ManagerState::Stalled, input_channel: sync_recv, network: SyncNetworkContext::new(network_send, log.clone()), - range_sync: RangeSync::new(beacon_chain, log.clone()), + range_sync: RangeSync::new(beacon_chain, sync_send.clone(), log.clone()), parent_queue: SmallVec::new(), single_block_lookups: FnvHashMap::default(), full_peers: HashSet::new(), @@ -679,6 +687,18 @@ impl Future for SyncManager { SyncMessage::RPCError(peer_id, request_id) => { self.inject_error(peer_id, request_id); } + SyncMessage::BatchProcessed { + process_id, + batch, + result, + } => { + self.range_sync.handle_block_process_result( + &mut self.network, + process_id, + *batch, + result, + ); + } }, Ok(Async::NotReady) => break, Ok(Async::Ready(None)) => { diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 45a7672e4d..e487e795b0 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,16 +1,40 @@ +use super::chain::BLOCKS_PER_BATCH; +use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::RequestId; use eth2_libp2p::PeerId; use fnv::FnvHashMap; use std::cmp::Ordering; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::ops::Sub; use types::{BeaconBlock, EthSpec, Hash256, Slot}; +#[derive(Copy, Clone, Debug, PartialEq)] +pub struct BatchId(pub u64); + +impl std::ops::Deref for BatchId { + type Target = u64; + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl std::ops::DerefMut for BatchId { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl std::convert::From for BatchId { + fn from(id: u64) -> Self { + BatchId(id) + } +} + /// A collection of sequential blocks that are requested from peers in a single RPC request. -#[derive(PartialEq)] +#[derive(PartialEq, Debug)] pub struct Batch { /// The ID of the batch, these are sequential. - pub id: u64, + pub id: BatchId, /// The requested start slot of the batch, inclusive. pub start_slot: Slot, /// The requested end slot of batch, exclusive. @@ -27,9 +51,41 @@ pub struct Batch { pub downloaded_blocks: Vec>, } +impl Eq for Batch {} + +impl Batch { + pub fn new( + id: BatchId, + start_slot: Slot, + end_slot: Slot, + head_root: Hash256, + peer_id: PeerId, + ) -> Self { + Batch { + id, + start_slot, + end_slot, + head_root, + _original_peer: peer_id.clone(), + current_peer: peer_id, + retries: 0, + downloaded_blocks: Vec::new(), + } + } + + pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest { + BlocksByRangeRequest { + head_block_root: self.head_root, + start_slot: self.start_slot.into(), + count: std::cmp::min(BLOCKS_PER_BATCH, self.end_slot.sub(self.start_slot).into()), + step: 1, + } + } +} + impl Ord for Batch { fn cmp(&self, other: &Self) -> Ordering { - self.id.cmp(&other.id) + self.id.0.cmp(&other.id.0) } } @@ -83,6 +139,11 @@ impl PendingBatches { } } + /// The number of current pending batch requests. + pub fn len(&self) -> usize { + self.batches.len() + } + /// Adds a block to the batches if the request id exists. Returns None if there is no batch /// matching the request id. pub fn add_block(&mut self, request_id: RequestId, block: BeaconBlock) -> Option<()> { diff --git a/beacon_node/network/src/sync/range_sync/batch_processing.rs b/beacon_node/network/src/sync/range_sync/batch_processing.rs new file mode 100644 index 0000000000..27c4fb295e --- /dev/null +++ b/beacon_node/network/src/sync/range_sync/batch_processing.rs @@ -0,0 +1,193 @@ +use super::batch::Batch; +use crate::message_processor::FUTURE_SLOT_TOLERANCE; +use crate::sync::manager::SyncMessage; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use slog::{debug, error, trace, warn}; +use std::sync::{Arc, Weak}; +use tokio::sync::mpsc; + +/// The result of attempting to process a batch of blocks. +// TODO: When correct batch error handling occurs, we will include an error type. +#[derive(Debug)] +pub enum BatchProcessResult { + /// The batch was completed successfully. + Success, + /// The batch processing failed. + Failed, +} + +// TODO: Refactor to async fn, with stable futures +pub fn spawn_batch_processor( + chain: Weak>, + process_id: u64, + batch: Batch, + mut sync_send: mpsc::UnboundedSender>, + log: slog::Logger, +) { + std::thread::spawn(move || { + debug!(log, "Processing batch"; "id" => *batch.id); + let result = match process_batch(chain, &batch, &log) { + Ok(_) => BatchProcessResult::Success, + Err(_) => BatchProcessResult::Failed, + }; + + debug!(log, "Batch processed"; "id" => *batch.id, "result" => format!("{:?}", result)); + + sync_send + .try_send(SyncMessage::BatchProcessed { + process_id, + batch: Box::new(batch), + result, + }) + .unwrap_or_else(|_| { + debug!( + log, + "Batch result could not inform sync. Likely shutting down." + ); + }); + }); +} + +// Helper function to process block batches which only consumes the chain and blocks to process +fn process_batch( + chain: Weak>, + batch: &Batch, + log: &slog::Logger, +) -> Result<(), String> { + let mut successful_block_import = false; + for block in &batch.downloaded_blocks { + if let Some(chain) = chain.upgrade() { + let processing_result = chain.process_block(block.clone()); + + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. + trace!( + log, "Imported block from network"; + "slot" => block.slot, + "block_root" => format!("{}", block_root), + ); + successful_block_import = true; + } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + warn!( + log, "Parent block is unknown"; + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + if successful_block_import { + run_fork_choice(chain, log); + } + return Err(format!( + "Block at slot {} has an unknown parent.", + block.slot + )); + } + BlockProcessingOutcome::BlockIsAlreadyKnown => { + // this block is already known to us, move to the next + debug!( + log, "Imported a block that is already known"; + "block_slot" => block.slot, + ); + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + warn!( + log, "Block is ahead of our slot clock"; + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + if successful_block_import { + run_fork_choice(chain, log); + } + return Err(format!( + "Block at slot {} is too far in the future", + block.slot + )); + } else { + // The block is in the future, but not too far. + debug!( + log, "Block is slightly ahead of our slot clock, ignoring."; + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + } + } + BlockProcessingOutcome::WouldRevertFinalizedSlot { .. } => { + debug!( + log, "Finalized or earlier block processed"; + "outcome" => format!("{:?}", outcome), + ); + // block reached our finalized slot or was earlier, move to the next block + } + BlockProcessingOutcome::GenesisBlock => { + debug!( + log, "Genesis block was processed"; + "outcome" => format!("{:?}", outcome), + ); + } + _ => { + warn!( + log, "Invalid block received"; + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + ); + if successful_block_import { + run_fork_choice(chain, log); + } + return Err(format!("Invalid block at slot {}", block.slot)); + } + } + } else { + warn!( + log, "BlockProcessingFailure"; + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + if successful_block_import { + run_fork_choice(chain, log); + } + return Err(format!( + "Unexpected block processing error: {:?}", + processing_result + )); + } + } else { + return Ok(()); // terminate early due to dropped beacon chain + } + } + + // Batch completed successfully, run fork choice. + if let Some(chain) = chain.upgrade() { + run_fork_choice(chain, log); + } + + Ok(()) +} + +/// Runs fork-choice on a given chain. This is used during block processing after one successful +/// block import. +fn run_fork_choice(chain: Arc>, log: &slog::Logger) { + match chain.fork_choice() { + Ok(()) => trace!( + log, + "Fork choice success"; + "location" => "batch processing" + ), + Err(e) => error!( + log, + "Fork choice failed"; + "error" => format!("{:?}", e), + "location" => "batch import error" + ), + } +} diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index ddeaf0583e..165d27ab25 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,28 +1,35 @@ -use crate::message_processor::FUTURE_SLOT_TOLERANCE; +use super::batch::{Batch, BatchId, PendingBatches}; +use super::batch_processing::{spawn_batch_processor, BatchProcessResult}; use crate::sync::network_context::SyncNetworkContext; -use crate::sync::range_sync::batch::{Batch, PendingBatches}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; -use eth2_libp2p::rpc::methods::*; +use crate::sync::SyncMessage; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::RequestId; use eth2_libp2p::PeerId; -use slog::{crit, debug, error, trace, warn, Logger}; +use rand::prelude::*; +use slog::{crit, debug, warn}; use std::collections::HashSet; -use std::ops::Sub; use std::sync::Weak; -use types::{BeaconBlock, EthSpec, Hash256, Slot}; +use tokio::sync::mpsc; +use types::{BeaconBlock, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many blocks per batch /// is requested. There is a timeout for each batch request. If this value is too high, we will /// downvote peers with poor bandwidth. This can be set arbitrarily high, in which case the /// responder will fill the response up to the max request size, assuming they have the bandwidth /// to do so. -//TODO: Make this dynamic based on peer's bandwidth -//TODO: This is lower due to current thread design. Modify once rebuilt. -const BLOCKS_PER_BATCH: u64 = 25; +pub const BLOCKS_PER_BATCH: u64 = 50; /// The number of times to retry a batch before the chain is considered failed and removed. const MAX_BATCH_RETRIES: u8 = 5; +/// The maximum number of batches to queue before requesting more. +const BATCH_BUFFER_SIZE: u8 = 5; + +/// Invalid batches are attempted to be re-downloaded from other peers. If they cannot be processed +/// after `INVALID_BATCH_LOOKUP_ATTEMPTS` times, the chain is considered faulty and all peers will +/// be downvoted. +const _INVALID_BATCH_LOOKUP_ATTEMPTS: u8 = 3; + /// A return type for functions that act on a `Chain` which informs the caller whether the chain /// has been completed and should be removed or to be kept if further processing is /// required. @@ -31,32 +38,6 @@ pub enum ProcessingResult { RemoveChain, } -impl Eq for Batch {} - -impl Batch { - fn new(id: u64, start_slot: Slot, end_slot: Slot, head_root: Hash256, peer_id: PeerId) -> Self { - Batch { - id, - start_slot, - end_slot, - head_root, - _original_peer: peer_id.clone(), - current_peer: peer_id, - retries: 0, - downloaded_blocks: Vec::new(), - } - } - - fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest { - BlocksByRangeRequest { - head_block_root: self.head_root, - start_slot: self.start_slot.into(), - count: std::cmp::min(BLOCKS_PER_BATCH, self.end_slot.sub(self.start_slot).into()), - step: 1, - } - } -} - /// A chain of blocks that need to be downloaded. Peers who claim to contain the target head /// root are grouped into the peer pool and queried for batches when downloading the /// chain. @@ -77,21 +58,37 @@ pub struct SyncingChain { /// The batches that have been downloaded and are awaiting processing and/or validation. completed_batches: Vec>, + /// Batches that have been processed and awaiting validation before being removed. + processed_batches: Vec>, + /// The peers that agree on the `target_head_slot` and `target_head_root` as a canonical chain /// and thus available to download this chain from. pub peer_pool: HashSet, /// The next batch_id that needs to be downloaded. - to_be_downloaded_id: u64, + to_be_downloaded_id: BatchId, /// The next batch id that needs to be processed. - to_be_processed_id: u64, + to_be_processed_id: BatchId, /// The last batch id that was processed. - last_processed_id: u64, + last_processed_id: BatchId, /// The current state of the chain. pub state: ChainSyncingState, + + /// A random id given to a batch process request. This is None if there is no ongoing batch + /// process. + current_processing_id: Option, + + /// A send channel to the sync manager. This is given to the batch processor thread to report + /// back once batch processing has completed. + sync_send: mpsc::UnboundedSender>, + + chain: Weak>, + + /// A reference to the sync logger. + log: slog::Logger, } #[derive(PartialEq)] @@ -100,8 +97,6 @@ pub enum ChainSyncingState { Stopped, /// The chain is undergoing syncing. Syncing, - /// The chain is temporarily paused whilst an error is rectified. - _Paused, } impl SyncingChain { @@ -110,6 +105,9 @@ impl SyncingChain { target_head_slot: Slot, target_head_root: Hash256, peer_id: PeerId, + sync_send: mpsc::UnboundedSender>, + chain: Weak>, + log: slog::Logger, ) -> Self { let mut peer_pool = HashSet::new(); peer_pool.insert(peer_id); @@ -120,11 +118,16 @@ impl SyncingChain { target_head_root, pending_batches: PendingBatches::new(), completed_batches: Vec::new(), + processed_batches: Vec::new(), peer_pool, - to_be_downloaded_id: 1, - to_be_processed_id: 1, - last_processed_id: 0, + to_be_downloaded_id: BatchId(1), + to_be_processed_id: BatchId(1), + last_processed_id: BatchId(0), state: ChainSyncingState::Stopped, + current_processing_id: None, + sync_send, + chain, + log, } } @@ -136,49 +139,45 @@ impl SyncingChain { /// batch. pub fn on_block_response( &mut self, - chain: &Weak>, network: &mut SyncNetworkContext, request_id: RequestId, beacon_block: &Option>, - log: &slog::Logger, - ) -> Option { + ) -> Option<()> { if let Some(block) = beacon_block { // This is not a stream termination, simply add the block to the request - self.pending_batches.add_block(request_id, block.clone())?; - Some(ProcessingResult::KeepChain) + self.pending_batches.add_block(request_id, block.clone()) } else { // A stream termination has been sent. This batch has ended. Process a completed batch. let batch = self.pending_batches.remove(request_id)?; - Some(self.process_completed_batch(chain.clone(), network, batch, log)) + self.handle_completed_batch(network, batch); + Some(()) } } /// A completed batch has been received, process the batch. /// This will return `ProcessingResult::KeepChain` if the chain has not completed or /// failed indicating that further batches are required. - fn process_completed_batch( + fn handle_completed_batch( &mut self, - chain: Weak>, network: &mut SyncNetworkContext, batch: Batch, - log: &slog::Logger, - ) -> ProcessingResult { + ) { // An entire batch of blocks has been received. This functions checks to see if it can be processed, // remove any batches waiting to be verified and if this chain is syncing, request new // blocks for the peer. - debug!(log, "Completed batch received"; "id"=>batch.id, "blocks"=>batch.downloaded_blocks.len(), "awaiting_batches" => self.completed_batches.len()); + debug!(self.log, "Completed batch received"; "id"=> *batch.id, "blocks"=>batch.downloaded_blocks.len(), "awaiting_batches" => self.completed_batches.len()); // verify the range of received blocks // Note that the order of blocks is verified in block processing if let Some(last_slot) = batch.downloaded_blocks.last().map(|b| b.slot) { // the batch is non-empty if batch.start_slot > batch.downloaded_blocks[0].slot || batch.end_slot < last_slot { - warn!(log, "BlocksByRange response returned out of range blocks"; + warn!(self.log, "BlocksByRange response returned out of range blocks"; "response_initial_slot" => batch.downloaded_blocks[0].slot, "requested_initial_slot" => batch.start_slot); network.downvote_peer(batch.current_peer); self.to_be_processed_id = batch.id; // reset the id back to here, when incrementing, it will check against completed batches - return ProcessingResult::KeepChain; + return; } } @@ -200,138 +199,138 @@ impl SyncingChain { // already be processed but not verified and therefore have Id's less than // `self.to_be_processed_id`. - //TODO: Run the processing of blocks in a separate thread. Build a queue of completed - //blocks here, manage the queue and process them in another thread as they become - //available. + // pre-emptively request more blocks from peers whilst we process current blocks, + self.request_batches(network); - if self.state == ChainSyncingState::Syncing { - // pre-emptively request more blocks from peers whilst we process current blocks, - if !self.send_range_request(network, log) { - debug!(log, "No peer available for next batch.") - } + // Try and process any completed batches. This will spawn a new task to process any blocks + // that are ready to be processed. + self.process_completed_batches(); + } + + /// Tries to process any batches if there are any available and we are not currently processing + /// other batches. + fn process_completed_batches(&mut self) { + // Only process batches if this chain is Syncing + if self.state != ChainSyncingState::Syncing { + return; } - // Try and process batches sequentially in the ordered list. - let current_process_id = self.to_be_processed_id; - // keep track of the number of successful batches to decide whether to run fork choice - let mut successful_block_process = false; + // Only process one batch at a time + if self.current_processing_id.is_some() { + return; + } - for batch in self - .completed_batches - .iter() - .filter(|batch| batch.id >= current_process_id) + // Check if there is a batch ready to be processed + while !self.completed_batches.is_empty() + && self.completed_batches[0].id == self.to_be_processed_id { - if batch.id != self.to_be_processed_id { - // there are no batches to be processed at the moment - break; - } - + let batch = self.completed_batches.remove(0); if batch.downloaded_blocks.is_empty() { - // the batch was empty, progress to the next block - self.to_be_processed_id += 1; + // The batch was empty, consider this processed and move to the next batch + self.processed_batches.push(batch); + *self.to_be_processed_id += 1; continue; } - // process the batch - // Keep track of successful batches. Run fork choice after all waiting batches have - // been processed. - debug!(log, "Processing batch"; "batch_id" => batch.id); - match process_batch(chain.clone(), batch, log) { - Ok(_) => { - // batch was successfully processed - self.last_processed_id = self.to_be_processed_id; - self.to_be_processed_id += 1; - successful_block_process = true; - } - Err(e) => { - warn!(log, "Block processing error"; "error"=> format!("{:?}", e)); - - if successful_block_process { - if let Some(chain) = chain.upgrade() { - match chain.fork_choice() { - Ok(()) => trace!( - log, - "Fork choice success"; - "location" => "batch import error" - ), - Err(e) => error!( - log, - "Fork choice failed"; - "error" => format!("{:?}", e), - "location" => "batch import error" - ), - } - } - } - - // batch processing failed - // this could be because this batch is invalid, or a previous invalidated batch - // is invalid. We need to find out which and downvote the peer that has sent us - // an invalid batch. - - // firstly remove any validated batches - return self.handle_invalid_batch(chain, network); - } - } - } - // If we have processed batches, run fork choice - if successful_block_process { - if let Some(chain) = chain.upgrade() { - match chain.fork_choice() { - Ok(()) => trace!( - log, - "Fork choice success"; - "location" => "batch import success" - ), - Err(e) => error!( - log, - "Fork choice failed"; - "error" => format!("{:?}", e), - "location" => "batch import success" - ), - } - } - } - - // remove any validated batches - let last_processed_id = self.last_processed_id; - self.completed_batches - .retain(|batch| batch.id >= last_processed_id); - - // check if the chain has completed syncing - if self.start_slot + self.last_processed_id * BLOCKS_PER_BATCH >= self.target_head_slot { - // chain is completed - ProcessingResult::RemoveChain - } else { - // chain is not completed - ProcessingResult::KeepChain + // send the batch to the batch processor thread + return self.process_batch(batch); } } - /// An invalid batch has been received that could not be processed. - fn handle_invalid_batch( + /// Sends a batch to the batch processor. + fn process_batch(&mut self, batch: Batch) { + // only spawn one instance at a time + let processing_id: u64 = rand::random(); + self.current_processing_id = Some(processing_id); + spawn_batch_processor( + self.chain.clone(), + processing_id, + batch, + self.sync_send.clone(), + self.log.clone(), + ); + } + + /// The block processor has completed processing a batch. This function handles the result + /// of the batch processor. + pub fn on_batch_process_result( &mut self, - _chain: Weak>, network: &mut SyncNetworkContext, - ) -> ProcessingResult { - // The current batch could not be processed, indicating either the current or previous - // batches are invalid - - // The previous batch could be - // incomplete due to the block sizes being too large to fit in a single RPC - // request or there could be consecutive empty batches which are not supposed to be there - - // Address these two cases individually. - // Firstly, check if the past batch is invalid. - // - - //TODO: Implement this logic - // Currently just fail the chain, and drop all associated peers, removing them from the - // peer pool, to prevent re-status - for peer_id in self.peer_pool.drain() { - network.downvote_peer(peer_id); + processing_id: u64, + batch: &mut Option>, + result: &BatchProcessResult, + ) -> Option { + if Some(processing_id) != self.current_processing_id { + // batch process doesn't belong to this chain + return None; } - ProcessingResult::RemoveChain + + // Consume the batch option + let batch = batch.take().or_else(|| { + crit!(self.log, "Processed batch taken by another chain"); + None + })?; + + // double check batches are processed in order TODO: Remove for prod + if batch.id != self.to_be_processed_id { + crit!(self.log, "Batch processed out of order"; + "processed_batch_id" => *batch.id, + "expected_id" => *self.to_be_processed_id); + } + + self.current_processing_id = None; + + let res = match result { + BatchProcessResult::Success => { + *self.to_be_processed_id += 1; + // This variable accounts for skip slots and batches that were not actually + // processed due to having no blocks. + self.last_processed_id = batch.id; + + // Remove any validate batches awaiting validation. + // Only batches that have blocks are processed here, therefore all previous batches + // have been correct. + let last_processed_id = self.last_processed_id; + self.processed_batches + .retain(|batch| batch.id.0 >= last_processed_id.0); + + // add the current batch to processed batches to be verified in the future. We are + // only uncertain about this batch, if it has not returned all blocks. + if batch.downloaded_blocks.len() < BLOCKS_PER_BATCH as usize { + self.processed_batches.push(batch); + } + + // check if the chain has completed syncing + if self.start_slot + *self.last_processed_id * BLOCKS_PER_BATCH + >= self.target_head_slot + { + // chain is completed + ProcessingResult::RemoveChain + } else { + // chain is not completed + + // attempt to request more batches + self.request_batches(network); + + // attempt to process more batches + self.process_completed_batches(); + + // keep the chain + ProcessingResult::KeepChain + } + } + BatchProcessResult::Failed => { + // batch processing failed + // this could be because this batch is invalid, or a previous invalidated batch + // is invalid. We need to find out which and downvote the peer that has sent us + // an invalid batch. + + // firstly remove any validated batches + self.handle_invalid_batch(network, batch) + } + }; + + Some(res) } pub fn stop_syncing(&mut self) { @@ -342,154 +341,66 @@ impl SyncingChain { /// This chain has been requested to start syncing. /// /// This could be new chain, or an old chain that is being resumed. - pub fn start_syncing( - &mut self, - network: &mut SyncNetworkContext, - local_finalized_slot: Slot, - log: &slog::Logger, - ) { + pub fn start_syncing(&mut self, network: &mut SyncNetworkContext, local_finalized_slot: Slot) { // A local finalized slot is provided as other chains may have made // progress whilst this chain was Stopped or paused. If so, update the `processed_batch_id` to // accommodate potentially downloaded batches from other chains. Also prune any old batches // awaiting processing - // Only important if the local head is more than a batch worth of blocks ahead of - // what this chain believes is downloaded - let batches_ahead = local_finalized_slot - .as_u64() - .saturating_sub(self.start_slot.as_u64() + self.last_processed_id * BLOCKS_PER_BATCH) - / BLOCKS_PER_BATCH; + // If the local finalized epoch is ahead of our current processed chain, update the chain + // to start from this point and re-index all subsequent batches starting from one + // (effectively creating a new chain). - if batches_ahead != 0 { - // there are `batches_ahead` whole batches that have been downloaded by another - // chain. Set the current processed_batch_id to this value. - debug!(log, "Updating chains processed batches"; "old_completed_slot" => self.start_slot + self.last_processed_id*BLOCKS_PER_BATCH, "new_completed_slot" => self.start_slot + (self.last_processed_id + batches_ahead)*BLOCKS_PER_BATCH); - self.last_processed_id += batches_ahead; + if local_finalized_slot.as_u64() + > self + .start_slot + .as_u64() + .saturating_add(*self.last_processed_id * BLOCKS_PER_BATCH) + { + debug!(self.log, "Updating chain's progress"; + "prev_completed_slot" => self.start_slot + *self.last_processed_id*BLOCKS_PER_BATCH, + "new_completed_slot" => local_finalized_slot.as_u64()); + // Re-index batches + *self.last_processed_id = 0; + *self.to_be_downloaded_id = 1; + *self.to_be_processed_id = 1; - if self.start_slot + self.last_processed_id * BLOCKS_PER_BATCH - > self.target_head_slot.as_u64() - { - crit!( - log, - "Current head slot is above the target head"; - "target_head_slot" => self.target_head_slot.as_u64(), - "new_start" => self.start_slot + self.last_processed_id * BLOCKS_PER_BATCH, - ); - return; - } - - // update the `to_be_downloaded_id` - if self.to_be_downloaded_id < self.last_processed_id { - self.to_be_downloaded_id = self.last_processed_id; - } - - let last_processed_id = self.last_processed_id; - self.completed_batches - .retain(|batch| batch.id >= last_processed_id.saturating_sub(1)); + // remove any completed or processed batches + self.completed_batches.clear(); + self.processed_batches.clear(); } - // Now begin requesting blocks from the peer pool, until all peers are exhausted. - while self.send_range_request(network, log) {} - self.state = ChainSyncingState::Syncing; + + // start processing batches if needed + self.process_completed_batches(); + + // begin requesting blocks from the peer pool, until all peers are exhausted. + self.request_batches(network); } /// Add a peer to the chain. /// /// If the chain is active, this starts requesting batches from this peer. - pub fn add_peer( - &mut self, - network: &mut SyncNetworkContext, - peer_id: PeerId, - log: &slog::Logger, - ) { + pub fn add_peer(&mut self, network: &mut SyncNetworkContext, peer_id: PeerId) { self.peer_pool.insert(peer_id.clone()); // do not request blocks if the chain is not syncing if let ChainSyncingState::Stopped = self.state { - debug!(log, "Peer added to a non-syncing chain"; "peer_id" => format!("{:?}", peer_id)); + debug!(self.log, "Peer added to a non-syncing chain"; "peer_id" => format!("{:?}", peer_id)); return; } - // find the next batch and request it from the peer - self.send_range_request(network, log); + // find the next batch and request it from any peers if we need to + self.request_batches(network); } /// Sends a STATUS message to all peers in the peer pool. - pub fn status_peers(&self, chain: Weak>, network: &mut SyncNetworkContext) { + pub fn status_peers(&self, network: &mut SyncNetworkContext) { for peer_id in self.peer_pool.iter() { - network.status_peer(chain.clone(), peer_id.clone()); + network.status_peer(self.chain.clone(), peer_id.clone()); } } - /// Requests the next required batch from a peer. Returns true, if there was a peer available - /// to send a request and there are batches to request, false otherwise. - fn send_range_request(&mut self, network: &mut SyncNetworkContext, log: &slog::Logger) -> bool { - // find the next pending batch and request it from the peer - if let Some(peer_id) = self.get_next_peer() { - if let Some(batch) = self.get_next_batch(peer_id) { - debug!(log, "Requesting batch"; "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => batch.id, "peer" => format!("{:?}", batch.current_peer), "head_root"=> format!("{}", batch.head_root)); - // send the batch - self.send_batch(network, batch); - return true; - } - } - false - } - - /// Returns a peer if there exists a peer which does not currently have a pending request. - /// - /// This is used to create the next request. - fn get_next_peer(&self) -> Option { - for peer in self.peer_pool.iter() { - if self.pending_batches.peer_is_idle(peer) { - return Some(peer.clone()); - } - } - None - } - - /// Requests the provided batch from the provided peer. - fn send_batch(&mut self, network: &mut SyncNetworkContext, batch: Batch) { - let request = batch.to_blocks_by_range_request(); - if let Ok(request_id) = network.blocks_by_range_request(batch.current_peer.clone(), request) - { - // add the batch to pending list - self.pending_batches.insert(request_id, batch); - } - } - - /// Returns the next required batch from the chain if it exists. If there are no more batches - /// required, `None` is returned. - fn get_next_batch(&mut self, peer_id: PeerId) -> Option> { - let batch_start_slot = - self.start_slot + self.to_be_downloaded_id.saturating_sub(1) * BLOCKS_PER_BATCH; - if batch_start_slot > self.target_head_slot { - return None; - } - let batch_end_slot = std::cmp::min( - batch_start_slot + BLOCKS_PER_BATCH, - self.target_head_slot.saturating_add(1u64), - ); - - let batch_id = self.to_be_downloaded_id; - // find the next batch id. The largest of the next sequential idea, of the next uncompleted - // id - let max_completed_id = - self.completed_batches - .iter() - .fold(0, |max, batch| if batch.id > max { batch.id } else { max }); - self.to_be_downloaded_id = - std::cmp::max(self.to_be_downloaded_id + 1, max_completed_id + 1); - - Some(Batch::new( - batch_id, - batch_start_slot, - batch_end_slot, - self.target_head_root, - peer_id, - )) - } - /// An RPC error has occurred. /// /// Checks if the request_id is associated with this chain. If so, attempts to re-request the @@ -501,18 +412,21 @@ impl SyncingChain { network: &mut SyncNetworkContext, peer_id: &PeerId, request_id: RequestId, - log: &slog::Logger, ) -> Option { if let Some(batch) = self.pending_batches.remove(request_id) { - warn!(log, "Batch failed. RPC Error"; "id" => batch.id, "retries" => batch.retries, "peer" => format!("{:?}", peer_id)); + warn!(self.log, "Batch failed. RPC Error"; + "id" => *batch.id, + "retries" => batch.retries, + "peer" => format!("{:?}", peer_id)); - Some(self.failed_batch(network, batch, log)) + Some(self.failed_batch(network, batch)) } else { None } } - /// A batch has failed. + /// A batch has failed. This occurs when a network timeout happens or the peer didn't respond. + /// These events do not indicate a malicious peer, more likely simple networking issues. /// /// Attempts to re-request from another peer in the peer pool (if possible) and returns /// `ProcessingResult::RemoveChain` if the number of retries on the batch exceeds @@ -521,7 +435,6 @@ impl SyncingChain { &mut self, network: &mut SyncNetworkContext, mut batch: Batch, - log: &Logger, ) -> ProcessingResult { batch.retries += 1; @@ -541,116 +454,152 @@ impl SyncingChain { .unwrap_or_else(|| current_peer); batch.current_peer = new_peer.clone(); - debug!(log, "Re-Requesting batch"; "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => batch.id, "peer" => format!("{:?}", batch.current_peer), "head_root"=> format!("{}", batch.head_root)); + debug!(self.log, "Re-Requesting batch"; + "start_slot" => batch.start_slot, + "end_slot" => batch.end_slot, + "id" => *batch.id, + "peer" => format!("{:?}", batch.current_peer), + "head_root"=> format!("{}", batch.head_root)); self.send_batch(network, batch); ProcessingResult::KeepChain } } -} -// Helper function to process block batches which only consumes the chain and blocks to process -fn process_batch( - chain: Weak>, - batch: &Batch, - log: &Logger, -) -> Result<(), String> { - for block in &batch.downloaded_blocks { - if let Some(chain) = chain.upgrade() { - let processing_result = chain.process_block(block.clone()); + /// An invalid batch has been received that could not be processed. + /// + /// These events occur when a peer as successfully responded with blocks, but the blocks we + /// have received are incorrect or invalid. This indicates the peer has not performed as + /// intended and can result in downvoting a peer. + fn handle_invalid_batch( + &mut self, + network: &mut SyncNetworkContext, + _batch: Batch, + ) -> ProcessingResult { + // The current batch could not be processed, indicating either the current or previous + // batches are invalid - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - trace!( - log, "Imported block from network"; - "slot" => block.slot, - "block_root" => format!("{}", block_root), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // blocks should be sequential and all parents should exist - trace!( - log, "Parent block is unknown"; - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - ); - return Err(format!( - "Block at slot {} has an unknown parent.", - block.slot - )); - } - BlockProcessingOutcome::BlockIsAlreadyKnown => { - // this block is already known to us, move to the next - debug!( - log, "Imported a block that is already known"; - "block_slot" => block.slot, - ); - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. - trace!( - log, "Block is ahead of our slot clock"; - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - ); - return Err(format!( - "Block at slot {} is too far in the future", - block.slot - )); - } else { - // The block is in the future, but not too far. - trace!( - log, "Block is slightly ahead of our slot clock, ignoring."; - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - ); - } - } - BlockProcessingOutcome::WouldRevertFinalizedSlot { .. } => { - trace!( - log, "Finalized or earlier block processed"; - "outcome" => format!("{:?}", outcome), - ); - // block reached our finalized slot or was earlier, move to the next block - } - BlockProcessingOutcome::GenesisBlock => { - trace!( - log, "Genesis block was processed"; - "outcome" => format!("{:?}", outcome), - ); - } - _ => { - warn!( - log, "Invalid block received"; - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - ); - return Err(format!("Invalid block at slot {}", block.slot)); - } - } - } else { - warn!( - log, "BlockProcessingFailure"; - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - return Err(format!( - "Unexpected block processing error: {:?}", - processing_result - )); - } - } else { - return Ok(()); // terminate early due to dropped beacon chain + // The previous batch could be incomplete due to the block sizes being too large to fit in + // a single RPC request or there could be consecutive empty batches which are not supposed + // to be there + + // The current (sub-optimal) strategy is to simply re-request all batches that could + // potentially be faulty. If a batch returns a different result than the original and + // results in successful processing, we downvote the original peer that sent us the batch. + + // If all batches return the same result, we try this process INVALID_BATCH_LOOKUP_ATTEMPTS + // times before considering the entire chain invalid and downvoting all peers. + + // Firstly, check if there are any past batches that could be invalid. + if !self.processed_batches.is_empty() { + // try and re-download this batch from other peers + } + + //TODO: Implement this logic + // Currently just fail the chain, and drop all associated peers, removing them from the + // peer pool, to prevent re-status + for peer_id in self.peer_pool.drain() { + network.downvote_peer(peer_id); + } + ProcessingResult::RemoveChain + } + + /// Attempts to request the next required batches from the peer pool if the chain is syncing. + /// It will exhaust the peer pool and left over batches until the batch buffer is reached or + /// all peers are exhausted. + fn request_batches(&mut self, network: &mut SyncNetworkContext) { + if let ChainSyncingState::Syncing = self.state { + while self.send_range_request(network) {} } } - Ok(()) + /// Requests the next required batch from a peer. Returns true, if there was a peer available + /// to send a request and there are batches to request, false otherwise. + fn send_range_request(&mut self, network: &mut SyncNetworkContext) -> bool { + // find the next pending batch and request it from the peer + if let Some(peer_id) = self.get_next_peer() { + if let Some(batch) = self.get_next_batch(peer_id) { + debug!(self.log, "Requesting batch"; "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => *batch.id, "peer" => format!("{:?}", batch.current_peer), "head_root"=> format!("{}", batch.head_root)); + // send the batch + self.send_batch(network, batch); + return true; + } + } + false + } + + /// Returns a peer if there exists a peer which does not currently have a pending request. + /// + /// This is used to create the next request. + fn get_next_peer(&self) -> Option { + // randomize the peers for load balancing + let mut rng = rand::thread_rng(); + let mut peers = self.peer_pool.iter().collect::>(); + peers.shuffle(&mut rng); + for peer in peers { + if self.pending_batches.peer_is_idle(peer) { + return Some(peer.clone()); + } + } + None + } + + /// Returns the next required batch from the chain if it exists. If there are no more batches + /// required, `None` is returned. + fn get_next_batch(&mut self, peer_id: PeerId) -> Option> { + // only request batches up to the buffer size limit + if self + .completed_batches + .len() + .saturating_add(self.pending_batches.len()) + > BATCH_BUFFER_SIZE as usize + { + return None; + } + + // don't request batches beyond the target head slot + let batch_start_slot = + self.start_slot + self.to_be_downloaded_id.saturating_sub(1) * BLOCKS_PER_BATCH; + if batch_start_slot > self.target_head_slot { + return None; + } + // truncate the batch to the target head of the chain + let batch_end_slot = std::cmp::min( + batch_start_slot + BLOCKS_PER_BATCH, + self.target_head_slot.saturating_add(1u64), + ); + + let batch_id = self.to_be_downloaded_id; + + // Find the next batch id. The largest of the next sequential id, or the next uncompleted + // id + let max_completed_id = self + .completed_batches + .iter() + .last() + .map(|x| x.id.0) + .unwrap_or_else(|| 0); + // TODO: Check if this is necessary + self.to_be_downloaded_id = BatchId(std::cmp::max( + self.to_be_downloaded_id.0 + 1, + max_completed_id + 1, + )); + + Some(Batch::new( + batch_id, + batch_start_slot, + batch_end_slot, + self.target_head_root, + peer_id, + )) + } + + /// Requests the provided batch from the provided peer. + fn send_batch(&mut self, network: &mut SyncNetworkContext, batch: Batch) { + let request = batch.to_blocks_by_range_request(); + if let Ok(request_id) = network.blocks_by_range_request(batch.current_peer.clone(), request) + { + // add the batch to pending list + self.pending_batches.insert(request_id, batch); + } + } } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 68c0c9a26c..a42589e9d0 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -3,13 +3,15 @@ //! Each chain type is stored in it's own vector. A variety of helper functions are given along //! with this struct to to simplify the logic of the other layers of sync. -use super::chain::{ChainSyncingState, ProcessingResult, SyncingChain}; +use super::chain::{ChainSyncingState, SyncingChain}; use crate::message_processor::PeerSyncInfo; +use crate::sync::manager::SyncMessage; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::PeerId; use slog::{debug, error, warn}; use std::sync::Weak; +use tokio::sync::mpsc; use types::EthSpec; use types::{Hash256, Slot}; @@ -148,7 +150,7 @@ impl ChainCollection { // Stop the current chain from syncing self.finalized_chains[index].stop_syncing(); // Start the new chain - self.finalized_chains[new_index].start_syncing(network, local_slot, log); + self.finalized_chains[new_index].start_syncing(network, local_slot); self.sync_state = SyncState::Finalized; } } else if let Some(chain) = self @@ -158,7 +160,7 @@ impl ChainCollection { { // There is no currently syncing finalization chain, starting the one with the most peers debug!(log, "New finalized chain started syncing"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_slot"=> chain.start_slot); - chain.start_syncing(network, local_slot, log); + chain.start_syncing(network, local_slot); self.sync_state = SyncState::Finalized; } else { // There are no finalized chains, update the state. @@ -177,16 +179,22 @@ impl ChainCollection { target_head: Hash256, target_slot: Slot, peer_id: PeerId, + sync_send: mpsc::UnboundedSender>, + log: &slog::Logger, ) { self.finalized_chains.push(SyncingChain::new( local_finalized_slot, target_slot, target_head, peer_id, + sync_send, + self.beacon_chain.clone(), + log.clone(), )); } /// Add a new finalized chain to the collection and starts syncing it. + #[allow(clippy::too_many_arguments)] pub fn new_head_chain( &mut self, network: &mut SyncNetworkContext, @@ -194,6 +202,7 @@ impl ChainCollection { target_head: Hash256, target_slot: Slot, peer_id: PeerId, + sync_send: mpsc::UnboundedSender>, log: &slog::Logger, ) { // remove the peer from any other head chains @@ -203,10 +212,17 @@ impl ChainCollection { }); self.head_chains.retain(|chain| !chain.peer_pool.is_empty()); - let mut new_head_chain = - SyncingChain::new(remote_finalized_slot, target_slot, target_head, peer_id); + let mut new_head_chain = SyncingChain::new( + remote_finalized_slot, + target_slot, + target_head, + peer_id, + sync_send, + self.beacon_chain.clone(), + log.clone(), + ); // All head chains can sync simultaneously - new_head_chain.start_syncing(network, remote_finalized_slot, log); + new_head_chain.start_syncing(network, remote_finalized_slot); self.head_chains.push(new_head_chain); } @@ -218,10 +234,10 @@ impl ChainCollection { /// Given a chain iterator, runs a given function on each chain until the function returns /// `Some`. This allows the `RangeSync` struct to loop over chains and optionally remove the /// chain from the collection if the function results in completing the chain. - fn request_function<'a, F, I>(chain: I, mut func: F) -> Option<(usize, ProcessingResult)> + fn request_function<'a, F, I, U>(chain: I, mut func: F) -> Option<(usize, U)> where I: Iterator>, - F: FnMut(&'a mut SyncingChain) -> Option, + F: FnMut(&'a mut SyncingChain) -> Option, { chain .enumerate() @@ -229,25 +245,25 @@ impl ChainCollection { } /// Runs a function on all finalized chains. - pub fn finalized_request(&mut self, func: F) -> Option<(usize, ProcessingResult)> + pub fn finalized_request(&mut self, func: F) -> Option<(usize, U)> where - F: FnMut(&mut SyncingChain) -> Option, + F: FnMut(&mut SyncingChain) -> Option, { ChainCollection::request_function(self.finalized_chains.iter_mut(), func) } /// Runs a function on all head chains. - pub fn head_request(&mut self, func: F) -> Option<(usize, ProcessingResult)> + pub fn head_request(&mut self, func: F) -> Option<(usize, U)> where - F: FnMut(&mut SyncingChain) -> Option, + F: FnMut(&mut SyncingChain) -> Option, { ChainCollection::request_function(self.head_chains.iter_mut(), func) } /// Runs a function on all finalized and head chains. - pub fn head_finalized_request(&mut self, func: F) -> Option<(usize, ProcessingResult)> + pub fn head_finalized_request(&mut self, func: F) -> Option<(usize, U)> where - F: FnMut(&mut SyncingChain) -> Option, + F: FnMut(&mut SyncingChain) -> Option, { ChainCollection::request_function( self.finalized_chains @@ -267,9 +283,9 @@ impl ChainCollection { .retain(|chain| !chain.peer_pool.is_empty()); self.head_chains.retain(|chain| !chain.peer_pool.is_empty()); - let local_info = match self.beacon_chain.upgrade() { + let (beacon_chain, local_info) = match self.beacon_chain.upgrade() { Some(chain) => match PeerSyncInfo::from_chain(&chain) { - Some(local) => local, + Some(local) => (chain, local), None => { return error!( log, @@ -288,18 +304,25 @@ impl ChainCollection { .start_slot(T::EthSpec::slots_per_epoch()); // Remove chains that are out-dated and re-status their peers - let beacon_chain_clone = self.beacon_chain.clone(); self.finalized_chains.retain(|chain| { - if chain.target_head_slot <= local_finalized_slot { - chain.status_peers(beacon_chain_clone.clone(), network); + if chain.target_head_slot <= local_finalized_slot + || beacon_chain + .block_root_tree + .is_known_block_root(&chain.target_head_root) + { + chain.status_peers(network); false } else { true } }); self.head_chains.retain(|chain| { - if chain.target_head_slot <= local_finalized_slot { - chain.status_peers(beacon_chain_clone.clone(), network); + if chain.target_head_slot <= local_finalized_slot + || beacon_chain + .block_root_tree + .is_known_block_root(&chain.target_head_root) + { + chain.status_peers(network); false } else { true @@ -331,11 +354,11 @@ impl ChainCollection { let chain = if index >= self.finalized_chains.len() { let index = index - self.finalized_chains.len(); let chain = self.head_chains.swap_remove(index); - chain.status_peers(self.beacon_chain.clone(), network); + chain.status_peers(network); chain } else { let chain = self.finalized_chains.swap_remove(index); - chain.status_peers(self.beacon_chain.clone(), network); + chain.status_peers(network); chain }; diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index 601840b8f0..28cff24e31 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -2,8 +2,11 @@ //! peers. mod batch; +mod batch_processing; mod chain; mod chain_collection; mod range; +pub use batch::Batch; +pub use batch_processing::BatchProcessResult; pub use range::RangeSync; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 98ef6e6219..60b9ea18be 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -41,7 +41,9 @@ use super::chain::ProcessingResult; use super::chain_collection::{ChainCollection, SyncState}; +use super::{Batch, BatchProcessResult}; use crate::message_processor::PeerSyncInfo; +use crate::sync::manager::SyncMessage; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::RequestId; @@ -49,6 +51,7 @@ use eth2_libp2p::PeerId; use slog::{debug, error, trace, warn}; use std::collections::HashSet; use std::sync::Weak; +use tokio::sync::mpsc; use types::{BeaconBlock, EthSpec}; /// The primary object dealing with long range/batch syncing. This contains all the active and @@ -64,16 +67,24 @@ pub struct RangeSync { /// finalized chain(s) complete, these peer's get STATUS'ed to update their head slot before /// the head chains are formed and downloaded. awaiting_head_peers: HashSet, + /// The sync manager channel, allowing the batch processor thread to callback the sync task + /// once complete. + sync_send: mpsc::UnboundedSender>, /// The syncing logger. log: slog::Logger, } impl RangeSync { - pub fn new(beacon_chain: Weak>, log: slog::Logger) -> Self { + pub fn new( + beacon_chain: Weak>, + sync_send: mpsc::UnboundedSender>, + log: slog::Logger, + ) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), chains: ChainCollection::new(beacon_chain), awaiting_head_peers: HashSet::new(), + sync_send, log, } } @@ -105,9 +116,10 @@ impl RangeSync { // determine if we need to run a sync to the nearest finalized state or simply sync to // its current head - let local_info = match self.beacon_chain.upgrade() { + + let (chain, local_info) = match self.beacon_chain.upgrade() { Some(chain) => match PeerSyncInfo::from_chain(&chain) { - Some(local) => local, + Some(local) => (chain, local), None => { return error!( self.log, @@ -117,10 +129,9 @@ impl RangeSync { } }, None => { - warn!(self.log, + return warn!(self.log, "Beacon chain dropped. Peer not considered for sync"; "peer_id" => format!("{:?}", peer_id)); - return; } }; @@ -138,7 +149,11 @@ impl RangeSync { // remove any out-of-date chains self.chains.purge_outdated_chains(network, &self.log); - if remote_finalized_slot > local_info.head_slot { + if remote_finalized_slot > local_info.head_slot + && !chain + .block_root_tree + .is_known_block_root(&remote.finalized_root) + { debug!(self.log, "Finalization sync peer joined"; "peer_id" => format!("{:?}", peer_id)); // Finalized chain search @@ -154,7 +169,7 @@ impl RangeSync { debug!(self.log, "Finalized chain exists, adding peer"; "peer_id" => format!("{:?}", peer_id), "target_root" => format!("{}", chain.target_head_root), "end_slot" => chain.target_head_slot, "start_slot"=> chain.start_slot); // add the peer to the chain's peer pool - chain.add_peer(network, peer_id, &self.log); + chain.add_peer(network, peer_id); // check if the new peer's addition will favour a new syncing chain. self.chains.update_finalized(network, &self.log); @@ -168,6 +183,8 @@ impl RangeSync { remote.finalized_root, remote_finalized_slot, peer_id, + self.sync_send.clone(), + &self.log, ); self.chains.update_finalized(network, &self.log); } @@ -188,7 +205,7 @@ impl RangeSync { debug!(self.log, "Adding peer to the existing head chain peer pool"; "head_root" => format!("{}",remote.head_root), "head_slot" => remote.head_slot, "peer_id" => format!("{:?}", peer_id)); // add the peer to the head's pool - chain.add_peer(network, peer_id, &self.log); + chain.add_peer(network, peer_id); } else { // There are no other head chains that match this peer's status, create a new one, and let start_slot = std::cmp::min(local_info.head_slot, remote_finalized_slot); @@ -199,6 +216,7 @@ impl RangeSync { remote.head_root, remote.head_slot, peer_id, + self.sync_send.clone(), &self.log, ); } @@ -223,17 +241,37 @@ impl RangeSync { // lookup should not be very expensive. However, we could add an extra index that maps the // request id to index of the vector to avoid O(N) searches and O(N) hash lookups. - let chain_ref = &self.beacon_chain; - let log_ref = &self.log; + let id_not_found = self + .chains + .head_finalized_request(|chain| { + chain.on_block_response(network, request_id, &beacon_block) + }) + .is_none(); + if id_not_found { + // The request didn't exist in any `SyncingChain`. Could have been an old request. Log + // and ignore + debug!(self.log, "Range response without matching request"; "peer" => format!("{:?}", peer_id), "request_id" => request_id); + } + } + + pub fn handle_block_process_result( + &mut self, + network: &mut SyncNetworkContext, + processing_id: u64, + batch: Batch, + result: BatchProcessResult, + ) { + // build an option for passing the batch to each chain + let mut batch = Some(batch); + match self.chains.finalized_request(|chain| { - chain.on_block_response(chain_ref, network, request_id, &beacon_block, log_ref) + chain.on_batch_process_result(network, processing_id, &mut batch, &result) }) { - Some((_, ProcessingResult::KeepChain)) => {} // blocks added to the chain Some((index, ProcessingResult::RemoveChain)) => { let chain = self.chains.remove_finalized_chain(index); debug!(self.log, "Finalized chain removed"; "start_slot" => chain.start_slot.as_u64(), "end_slot" => chain.target_head_slot.as_u64()); // the chain is complete, re-status it's peers - chain.status_peers(self.beacon_chain.clone(), network); + chain.status_peers(network); // update the state of the collection self.chains.update_finalized(network, &self.log); @@ -246,32 +284,30 @@ impl RangeSync { // sync match self.chains.sync_state() { SyncState::Idle | SyncState::Head => { - for peer_id in self.awaiting_head_peers.iter() { - network.status_peer(self.beacon_chain.clone(), peer_id.clone()); + for peer_id in self.awaiting_head_peers.drain() { + network.status_peer(self.beacon_chain.clone(), peer_id); } } SyncState::Finalized => {} // Have more finalized chains to complete } } + Some((_, ProcessingResult::KeepChain)) => {} None => { - // The request was not in any finalized chain, search head chains match self.chains.head_request(|chain| { - chain.on_block_response(&chain_ref, network, request_id, &beacon_block, log_ref) + chain.on_batch_process_result(network, processing_id, &mut batch, &result) }) { Some((index, ProcessingResult::RemoveChain)) => { let chain = self.chains.remove_head_chain(index); debug!(self.log, "Head chain completed"; "start_slot" => chain.start_slot.as_u64(), "end_slot" => chain.target_head_slot.as_u64()); // the chain is complete, re-status it's peers and remove it - chain.status_peers(self.beacon_chain.clone(), network); + chain.status_peers(network); // update the state of the collection self.chains.update_finalized(network, &self.log); } - Some(_) => {} + Some((_, ProcessingResult::KeepChain)) => {} None => { - // The request didn't exist in any `SyncingChain`. Could have been an old request. Log - // and ignore - debug!(self.log, "Range response without matching request"; "peer" => format!("{:?}", peer_id), "request_id" => request_id); + warn!(self.log, "No chains match the block processing id"; "id" => processing_id); } } } @@ -304,15 +340,12 @@ impl RangeSync { /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain and re-status all the peers. fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { - let log_ref = &self.log; if let Some((index, ProcessingResult::RemoveChain)) = self.chains.head_finalized_request(|chain| { if chain.peer_pool.remove(peer_id) { // this chain contained the peer while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) { - if let ProcessingResult::RemoveChain = - chain.failed_batch(network, batch, log_ref) - { + if let ProcessingResult::RemoveChain = chain.failed_batch(network, batch) { // a single batch failed, remove the chain return Some(ProcessingResult::RemoveChain); } @@ -341,10 +374,10 @@ impl RangeSync { request_id: RequestId, ) { // check that this request is pending - let log_ref = &self.log; - match self.chains.head_finalized_request(|chain| { - chain.inject_error(network, &peer_id, request_id, log_ref) - }) { + match self + .chains + .head_finalized_request(|chain| chain.inject_error(network, &peer_id, request_id)) + { Some((_, ProcessingResult::KeepChain)) => {} // error handled chain persists Some((index, ProcessingResult::RemoveChain)) => { debug!(self.log, "Chain being removed due to RPC error"); From 8c96739cab62b28d9646d3eb9f61445e81bc96fd Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 23 Jan 2020 17:31:08 +1100 Subject: [PATCH 11/16] Correct discovery address CLI functionality (#818) * Improve handling of discovery IP address CLI config * Remove excess debug logging * Add reviewers suggestions --- beacon_node/eth2-libp2p/src/config.rs | 7 ++++--- beacon_node/eth2-libp2p/src/discovery.rs | 7 +++++-- beacon_node/src/cli.rs | 3 ++- beacon_node/src/config.rs | 20 +++++++++++--------- 4 files changed, 22 insertions(+), 15 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 682a648b31..d0ec62df23 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -20,8 +20,9 @@ pub struct Config { /// The TCP port that libp2p listens on. pub libp2p_port: u16, - /// The address to broadcast to peers about which address we are listening on. - pub discovery_address: std::net::IpAddr, + /// The address to broadcast to peers about which address we are listening on. None indicates + /// that no discovery address has been set in the CLI args. + pub discovery_address: Option, /// UDP port that discovery listens on. pub discovery_port: u16, @@ -86,7 +87,7 @@ impl Default for Config { network_dir, listen_address: "127.0.0.1".parse().expect("valid ip address"), libp2p_port: 9000, - discovery_address: "127.0.0.1".parse().expect("valid ip address"), + discovery_address: None, discovery_port: 9000, max_peers: 10, secret_key_hex: None, diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index fa3a3b604f..d2c46da1ae 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -310,7 +310,9 @@ fn load_enr( // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers. let mut local_enr = EnrBuilder::new("v4") - .ip(config.discovery_address) + .ip(config + .discovery_address + .unwrap_or_else(|| "127.0.0.1".parse().expect("valid ip"))) .tcp(config.libp2p_port) .udp(config.discovery_port) .build(&local_key) @@ -325,7 +327,8 @@ fn load_enr( match Enr::from_str(&enr_string) { Ok(enr) => { if enr.node_id() == local_enr.node_id() { - if enr.ip().map(Into::into) == Some(config.discovery_address) + if (config.discovery_address.is_none() + || enr.ip().map(Into::into) == config.discovery_address) && enr.tcp() == Some(config.libp2p_port) && enr.udp() == Some(config.discovery_port) { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index bdd4a1b1a2..741e9045fd 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -90,7 +90,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("discovery-address") .value_name("ADDRESS") .help("The IP address to broadcast to other peers on how to reach this node. \ - Default is determined automatically.") + Default will load previous values from disk failing this it is set to 127.0.0.1 \ + and will be updated when connecting to other nodes on the network.") .takes_value(true), ) .arg( diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b68e1841e0..75efeea6c6 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -100,7 +100,6 @@ pub fn get_configs( .parse() .map_err(|_| format!("Invalid listen address: {:?}", listen_address_str))?; client_config.network.listen_address = listen_address; - client_config.network.discovery_address = listen_address; } if let Some(max_peers_str) = cli_args.value_of("maxpeers") { @@ -140,9 +139,11 @@ pub fn get_configs( } if let Some(discovery_address_str) = cli_args.value_of("discovery-address") { - client_config.network.discovery_address = discovery_address_str - .parse() - .map_err(|_| format!("Invalid discovery address: {:?}", discovery_address_str))? + client_config.network.discovery_address = Some( + discovery_address_str + .parse() + .map_err(|_| format!("Invalid discovery address: {:?}", discovery_address_str))?, + ) } if let Some(disc_port_str) = cli_args.value_of("disc-port") { @@ -283,8 +284,8 @@ pub fn get_configs( * Discovery address is set to localhost by default. */ if cli_args.is_present("zero-ports") { - if client_config.network.discovery_address == IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)) { - client_config.network.discovery_address = "127.0.0.1".parse().expect("Valid IP address") + if client_config.network.discovery_address == Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) { + client_config.network.discovery_address = None } client_config.network.libp2p_port = unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?; @@ -294,13 +295,14 @@ pub fn get_configs( client_config.websocket_server.port = 0; } - // ENR ip needs to be explicit for node to be discoverable - if client_config.network.discovery_address == IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)) { + // ENR IP needs to be explicit for node to be discoverable + if client_config.network.discovery_address == Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) { warn!( log, "Discovery address cannot be 0.0.0.0, Setting to to 127.0.0.1" ); - client_config.network.discovery_address = "127.0.0.1".parse().expect("Valid IP address") + client_config.network.discovery_address = + Some("127.0.0.1".parse().expect("Valid IP address")) } Ok((client_config, eth2_config, log)) } From 89f05e4a4f167312fe057557040d0fed30a601c3 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 23 Jan 2020 12:37:39 +0530 Subject: [PATCH 12/16] Adds simulator for syncing (#758) * Add CLI for beacon_chain_sim * Rename beacon-chain-sim to simulator * Fix simulator workflow * Push Cargo.lock * WIP syncing simulator * Add cli args * Remove eth1 stuff and deposits * Add syncing strategy simulations * Successful one node sync * Clean up * Rename to avoid confusion * add command line args * fix cargo fmt issues * Add additional syncing strategies * Run all syncing strategies one after other; add comments * Improve cli argument parsing * Change `end_after_checks` default to true * Small modifications to syncing-sim * Add `strategy` cli argument * Documented defaults in cli help Co-authored-by: mkinney Co-authored-by: Age Manning --- .github/workflows/test-suite.yml | 2 +- Cargo.lock | 29 +-- Cargo.toml | 2 +- .../Cargo.toml | 3 +- .../src/checks.rs | 4 +- tests/simulator/src/cli.rs | 73 +++++++ .../src/local_network.rs | 16 +- .../src/main.rs | 170 +++++++++++++-- tests/simulator/src/sync_sim.rs | 201 ++++++++++++++++++ 9 files changed, 461 insertions(+), 39 deletions(-) rename tests/{beacon_chain_sim => simulator}/Cargo.toml (93%) rename tests/{beacon_chain_sim => simulator}/src/checks.rs (98%) create mode 100644 tests/simulator/src/cli.rs rename tests/{beacon_chain_sim => simulator}/src/local_network.rs (90%) rename tests/{beacon_chain_sim => simulator}/src/main.rs (64%) create mode 100644 tests/simulator/src/sync_sim.rs diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 5b36f010fa..0f9689ea93 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -57,4 +57,4 @@ jobs: - name: Install ganache-cli run: sudo npm install -g ganache-cli - name: Run the beacon chain sim - run: cargo run --release --bin beacon_chain_sim + run: cargo run --release --bin simulator beacon-chain-sim diff --git a/Cargo.lock b/Cargo.lock index 05787dde38..db034d75fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -240,20 +240,6 @@ dependencies = [ "websocket_server 0.1.0", ] -[[package]] -name = "beacon_chain_sim" -version = "0.1.0" -dependencies = [ - "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "eth1_test_rig 0.1.0", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "node_test_rig 0.1.0", - "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", - "validator_client 0.1.0", -] - [[package]] name = "beacon_node" version = "0.1.0" @@ -3744,6 +3730,21 @@ dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "simulator" +version = "0.1.0" +dependencies = [ + "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "eth1_test_rig 0.1.0", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "node_test_rig 0.1.0", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0", + "validator_client 0.1.0", +] + [[package]] name = "slab" version = "0.3.0" diff --git a/Cargo.toml b/Cargo.toml index 7bf700ee5f..716c4c5b47 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ members = [ "beacon_node/eth1", "beacon_node/beacon_chain", "beacon_node/websocket_server", - "tests/beacon_chain_sim", + "tests/simulator", "tests/ef_tests", "tests/eth1_test_rig", "tests/node_test_rig", diff --git a/tests/beacon_chain_sim/Cargo.toml b/tests/simulator/Cargo.toml similarity index 93% rename from tests/beacon_chain_sim/Cargo.toml rename to tests/simulator/Cargo.toml index 3b992c07f0..189c24f637 100644 --- a/tests/beacon_chain_sim/Cargo.toml +++ b/tests/simulator/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "beacon_chain_sim" +name = "simulator" version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" @@ -15,3 +15,4 @@ futures = "0.1.29" tokio = "0.1.22" eth1_test_rig = { path = "../eth1_test_rig" } env_logger = "0.7.1" +clap = "2.33.0" diff --git a/tests/beacon_chain_sim/src/checks.rs b/tests/simulator/src/checks.rs similarity index 98% rename from tests/beacon_chain_sim/src/checks.rs rename to tests/simulator/src/checks.rs index adb2854476..8cfb0aaa23 100644 --- a/tests/beacon_chain_sim/src/checks.rs +++ b/tests/simulator/src/checks.rs @@ -41,7 +41,7 @@ pub fn verify_first_finalization( } /// Delays for `epochs`, plus half a slot extra. -fn epoch_delay( +pub fn epoch_delay( epochs: Epoch, slot_duration: Duration, slots_per_epoch: u64, @@ -60,7 +60,7 @@ fn slot_delay(slots: Slot, slot_duration: Duration) -> impl Future( +pub fn verify_all_finalized_at( network: LocalNetwork, epoch: Epoch, ) -> impl Future { diff --git a/tests/simulator/src/cli.rs b/tests/simulator/src/cli.rs new file mode 100644 index 0000000000..93c8cb051b --- /dev/null +++ b/tests/simulator/src/cli.rs @@ -0,0 +1,73 @@ +use clap::{App, Arg, SubCommand}; + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new("simulator") + .version(crate_version!()) + .author("Sigma Prime ") + .about("Options for interacting with simulator") + .subcommand( + SubCommand::with_name("beacon-chain-sim") + .about( + "Lighthouse Beacon Chain Simulator creates `n` beacon node and validator clients, \ + each with `v` validators. A deposit contract is deployed at the start of the \ + simulation using a local `ganache-cli` instance (you must have `ganache-cli` \ + installed and avaliable on your path). All beacon nodes independently listen \ + for genesis from the deposit contract, then start operating. \ + + As the simulation runs, there are checks made to ensure that all components \ + are running correctly. If any of these checks fail, the simulation will \ + exit immediately.", + ) + .arg(Arg::with_name("nodes") + .short("n") + .long("nodes") + .takes_value(true) + .help("Number of beacon nodes (default 4)")) + .arg(Arg::with_name("validators_per_node") + .short("v") + .long("validators_per_node") + .takes_value(true) + .help("Number of validators (default 20)")) + .arg(Arg::with_name("speed_up_factor") + .short("s") + .long("speed_up_factor") + .takes_value(true) + .help("Speed up factor (default 4)")) + .arg(Arg::with_name("end_after_checks") + .short("e") + .long("end_after_checks") + .takes_value(false) + .help("End after checks (default true)")) + ) + .subcommand( + SubCommand::with_name("syncing-sim") + .about("Run the syncing simulation") + .arg( + Arg::with_name("speedup") + .short("s") + .long("speedup") + .takes_value(true) + .help("Speed up factor for eth1 blocks and slot production (default 15)"), + ) + .arg( + Arg::with_name("initial_delay") + .short("i") + .long("initial_delay") + .takes_value(true) + .help("Epoch delay for new beacon node to start syncing (default 50)"), + ) + .arg( + Arg::with_name("sync_delay") + .long("sync_delay") + .takes_value(true) + .help("Epoch delay for newly added beacon nodes get synced (default 10)"), + ) + .arg( + Arg::with_name("strategy") + .long("strategy") + .takes_value(true) + .possible_values(&["one-node", "two-nodes", "mixed", "all"]) + .help("Sync strategy to run. (default all)"), + ), + ) +} diff --git a/tests/beacon_chain_sim/src/local_network.rs b/tests/simulator/src/local_network.rs similarity index 90% rename from tests/beacon_chain_sim/src/local_network.rs rename to tests/simulator/src/local_network.rs index fe9b0d5b81..c659b2f65e 100644 --- a/tests/beacon_chain_sim/src/local_network.rs +++ b/tests/simulator/src/local_network.rs @@ -6,7 +6,7 @@ use node_test_rig::{ use parking_lot::RwLock; use std::ops::Deref; use std::sync::Arc; -use types::EthSpec; +use types::{Epoch, EthSpec}; const BOOTNODE_PORT: u16 = 42424; @@ -82,7 +82,7 @@ impl LocalNetwork { mut beacon_config: ClientConfig, ) -> impl Future { let self_1 = self.clone(); - + println!("Adding beacon node.."); self.beacon_nodes .read() .first() @@ -154,4 +154,16 @@ impl LocalNetwork { .map(|beacon_node| beacon_node.remote_node()) .collect() } + + /// Return current epoch of bootnode. + pub fn bootnode_epoch(&self) -> impl Future { + let nodes = self.remote_nodes().expect("Failed to get remote nodes"); + let bootnode = nodes.first().expect("Should contain bootnode"); + bootnode + .http + .beacon() + .get_head() + .map_err(|e| format!("Cannot get head: {:?}", e)) + .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) + } } diff --git a/tests/beacon_chain_sim/src/main.rs b/tests/simulator/src/main.rs similarity index 64% rename from tests/beacon_chain_sim/src/main.rs rename to tests/simulator/src/main.rs index e03232e010..299779fe10 100644 --- a/tests/beacon_chain_sim/src/main.rs +++ b/tests/simulator/src/main.rs @@ -6,21 +6,23 @@ //! As the simulation runs, there are checks made to ensure that all components are running //! correctly. If any of these checks fail, the simulation will exit immediately. //! -//! By default, the simulation will end as soon as all checks have finished. It may be configured -//! to run indefinitely by setting `end_after_checks = false`. -//! //! ## Future works //! //! Presently all the beacon nodes and validator clients all log to stdout. Additionally, the //! simulation uses `println` to communicate some info. It might be nice if the nodes logged to //! easy-to-find files and stdout only contained info from the simulation. //! -//! It would also be nice to add a CLI using `clap` so that the variables in `main()` can be -//! changed without a recompile. + +#[macro_use] +extern crate clap; mod checks; +mod cli; mod local_network; +mod sync_sim; +use clap::ArgMatches; +use cli::cli_app; use env_logger::{Builder, Env}; use eth1_test_rig::GanacheEth1Instance; use futures::{future, stream, Future, Stream}; @@ -29,6 +31,7 @@ use node_test_rig::{ environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, }; use std::time::{Duration, Instant}; +use sync_sim::*; use tokio::timer::Interval; use types::MinimalEthSpec; @@ -38,30 +41,161 @@ fn main() { // Debugging output for libp2p and external crates. Builder::from_env(Env::default()).init(); - let nodes = 4; - let validators_per_node = 20; + let matches = cli_app().get_matches(); + match matches.subcommand() { + ("beacon-chain-sim", Some(matches)) => match run_beacon_chain_sim(matches) { + Ok(()) => println!("Simulation exited successfully"), + Err(e) => { + eprintln!("Simulation exited with error: {}", e); + std::process::exit(1) + } + }, + ("syncing-sim", Some(matches)) => match run_syncing_sim(matches) { + Ok(()) => println!("Simulation exited successfully"), + Err(e) => { + eprintln!("Simulation exited with error: {}", e); + std::process::exit(1) + } + }, + _ => { + eprintln!("Invalid subcommand. Use --help to see available options"); + std::process::exit(1) + } + } +} + +fn run_beacon_chain_sim(matches: &ArgMatches) -> Result<(), String> { + let nodes = value_t!(matches, "nodes", usize).unwrap_or(4); + let validators_per_node = value_t!(matches, "validators_per_node", usize).unwrap_or(20); + let speed_up_factor = value_t!(matches, "nodes", u64).unwrap_or(4); + let mut end_after_checks = true; + if matches.is_present("end_after_checks") { + end_after_checks = false; + } + + println!("Beacon Chain Simulator:"); + println!(" nodes:{}", nodes); + println!(" validators_per_node:{}", validators_per_node); + println!(" end_after_checks:{}", end_after_checks); + let log_level = "debug"; let log_format = None; - let speed_up_factor = 4; - let end_after_checks = true; - match async_sim( + beacon_chain_sim( nodes, validators_per_node, speed_up_factor, log_level, log_format, end_after_checks, - ) { - Ok(()) => println!("Simulation exited successfully"), - Err(e) => { - eprintln!("Simulation exited with error: {}", e); - std::process::exit(1) - } - } + ) } -fn async_sim( +fn run_syncing_sim(matches: &ArgMatches) -> Result<(), String> { + let initial_delay = value_t!(matches, "initial_delay", u64).unwrap_or(50); + let sync_delay = value_t!(matches, "sync_delay", u64).unwrap_or(10); + let speed_up_factor = value_t!(matches, "speedup", u64).unwrap_or(15); + let strategy = value_t!(matches, "strategy", String).unwrap_or("all".into()); + + println!("Syncing Simulator:"); + println!(" initial_delay:{}", initial_delay); + println!(" sync delay:{}", sync_delay); + println!(" speed up factor:{}", speed_up_factor); + println!(" strategy:{}", strategy); + + let log_level = "debug"; + let log_format = None; + + syncing_sim( + speed_up_factor, + initial_delay, + sync_delay, + strategy, + log_level, + log_format, + ) +} + +fn syncing_sim( + speed_up_factor: u64, + initial_delay: u64, + sync_delay: u64, + strategy: String, + log_level: &str, + log_format: Option<&str>, +) -> Result<(), String> { + let mut env = EnvironmentBuilder::minimal() + .async_logger(log_level, log_format)? + .multi_threaded_tokio_runtime()? + .build()?; + + let spec = &mut env.eth2_config.spec; + let end_after_checks = true; + + spec.milliseconds_per_slot = spec.milliseconds_per_slot / speed_up_factor; + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 16; + + let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); + + let context = env.core_context(); + let beacon_config = testing_client_config(); + let num_validators = 8; + let future = LocalNetwork::new(context, beacon_config.clone()) + /* + * Add a validator client which handles all validators from the genesis state. + */ + .and_then(move |network| { + network + .add_validator_client(ValidatorConfig::default(), 0, (0..num_validators).collect()) + .map(|_| network) + }) + /* + * Start the processes that will run checks on the network as it runs. + */ + .and_then(move |network| { + // The `final_future` either completes immediately or never completes, depending on the value + // of `end_after_checks`. + let final_future: Box + Send> = + if end_after_checks { + Box::new(future::ok(()).map_err(|()| "".to_string())) + } else { + Box::new(future::empty().map_err(|()| "".to_string())) + }; + + future::ok(()) + // Check all syncing strategies one after other. + .join(pick_strategy( + &strategy, + network.clone(), + beacon_config.clone(), + slot_duration, + initial_delay, + sync_delay, + )) + .join(final_future) + .map(|_| network) + }) + /* + * End the simulation by dropping the network. This will kill all running beacon nodes and + * validator clients. + */ + .map(|network| { + println!( + "Simulation complete. Finished with {} beacon nodes and {} validator clients", + network.beacon_node_count(), + network.validator_client_count() + ); + + // Be explicit about dropping the network, as this kills all the nodes. This ensures + // all the checks have adequate time to pass. + drop(network) + }); + + env.runtime().block_on(future) +} + +fn beacon_chain_sim( node_count: usize, validators_per_node: usize, speed_up_factor: u64, diff --git a/tests/simulator/src/sync_sim.rs b/tests/simulator/src/sync_sim.rs new file mode 100644 index 0000000000..6c18406b6e --- /dev/null +++ b/tests/simulator/src/sync_sim.rs @@ -0,0 +1,201 @@ +use crate::checks::{epoch_delay, verify_all_finalized_at}; +use crate::local_network::LocalNetwork; +use futures::{Future, IntoFuture}; +use node_test_rig::ClientConfig; +use std::time::Duration; +use types::{Epoch, EthSpec}; + +pub fn pick_strategy( + strategy: &str, + network: LocalNetwork, + beacon_config: ClientConfig, + slot_duration: Duration, + initial_delay: u64, + sync_delay: u64, +) -> Box + Send + 'static> { + match strategy { + "one-node" => Box::new(verify_one_node_sync( + network, + beacon_config, + slot_duration, + initial_delay, + sync_delay, + )), + "two-nodes" => Box::new(verify_two_nodes_sync( + network, + beacon_config, + slot_duration, + initial_delay, + sync_delay, + )), + "mixed" => Box::new(verify_in_between_sync( + network, + beacon_config, + slot_duration, + initial_delay, + sync_delay, + )), + "all" => Box::new(verify_syncing( + network, + beacon_config, + slot_duration, + initial_delay, + sync_delay, + )), + _ => Box::new(Err("Invalid strategy".into()).into_future()), + } +} + +/// Verify one node added after `initial_delay` epochs is in sync +/// after `sync_delay` epochs. +pub fn verify_one_node_sync( + network: LocalNetwork, + beacon_config: ClientConfig, + slot_duration: Duration, + initial_delay: u64, + sync_delay: u64, +) -> impl Future { + // Delay for `initial_delay` epochs before adding another node to start syncing + epoch_delay( + Epoch::new(initial_delay), + slot_duration, + E::slots_per_epoch(), + ) + .and_then(move |_| { + // Add a beacon node + network.add_beacon_node(beacon_config).map(|_| network) + }) + .and_then(move |network| { + // Delay for `sync_delay` epochs before verifying synced state. + epoch_delay(Epoch::new(sync_delay), slot_duration, E::slots_per_epoch()).map(|_| network) + }) + .and_then(move |network| network.bootnode_epoch().map(|e| (e, network))) + .and_then(move |(epoch, network)| { + verify_all_finalized_at(network, epoch).map_err(|e| format!("One node sync error: {}", e)) + }) +} + +/// Verify two nodes added after `initial_delay` epochs are in sync +/// after `sync_delay` epochs. +pub fn verify_two_nodes_sync( + network: LocalNetwork, + beacon_config: ClientConfig, + slot_duration: Duration, + initial_delay: u64, + sync_delay: u64, +) -> impl Future { + // Delay for `initial_delay` epochs before adding another node to start syncing + epoch_delay( + Epoch::new(initial_delay), + slot_duration, + E::slots_per_epoch(), + ) + .and_then(move |_| { + // Add beacon nodes + network + .add_beacon_node(beacon_config.clone()) + .join(network.add_beacon_node(beacon_config.clone())) + .map(|_| network) + }) + .and_then(move |network| { + // Delay for `sync_delay` epochs before verifying synced state. + epoch_delay(Epoch::new(sync_delay), slot_duration, E::slots_per_epoch()).map(|_| network) + }) + .and_then(move |network| network.bootnode_epoch().map(|e| (e, network))) + .and_then(move |(epoch, network)| { + verify_all_finalized_at(network, epoch).map_err(|e| format!("Two node sync error: {}", e)) + }) +} + +/// Add 2 syncing nodes after `initial_delay` epochs, +/// Add another node after `sync_delay - 5` epochs and verify all are +/// in sync after `sync_delay + 5` epochs. +pub fn verify_in_between_sync( + network: LocalNetwork, + beacon_config: ClientConfig, + slot_duration: Duration, + initial_delay: u64, + sync_delay: u64, +) -> impl Future { + // Delay for `initial_delay` epochs before adding another node to start syncing + let config1 = beacon_config.clone(); + epoch_delay( + Epoch::new(initial_delay), + slot_duration, + E::slots_per_epoch(), + ) + .and_then(move |_| { + // Add a beacon node + network + .add_beacon_node(beacon_config.clone()) + .join(network.add_beacon_node(beacon_config.clone())) + .map(|_| network) + }) + .and_then(move |network| { + // Delay before adding additional syncing nodes. + epoch_delay( + Epoch::new(sync_delay - 5), + slot_duration, + E::slots_per_epoch(), + ) + .map(|_| network) + }) + .and_then(move |network| { + // Add a beacon node + network.add_beacon_node(config1.clone()).map(|_| network) + }) + .and_then(move |network| { + // Delay for `sync_delay` epochs before verifying synced state. + epoch_delay( + Epoch::new(sync_delay + 5), + slot_duration, + E::slots_per_epoch(), + ) + .map(|_| network) + }) + .and_then(move |network| network.bootnode_epoch().map(|e| (e, network))) + .and_then(move |(epoch, network)| { + verify_all_finalized_at(network, epoch).map_err(|e| format!("In between sync error: {}", e)) + }) +} + +/// Run syncing strategies one after other. +pub fn verify_syncing( + network: LocalNetwork, + beacon_config: ClientConfig, + slot_duration: Duration, + initial_delay: u64, + sync_delay: u64, +) -> impl Future { + verify_one_node_sync( + network.clone(), + beacon_config.clone(), + slot_duration, + initial_delay, + sync_delay, + ) + .map(|_| println!("Completed one node sync")) + .and_then(move |_| { + verify_two_nodes_sync( + network.clone(), + beacon_config.clone(), + slot_duration, + initial_delay, + sync_delay, + ) + .map(|_| { + println!("Completed two node sync"); + (network, beacon_config) + }) + }) + .and_then(move |(network, beacon_config)| { + verify_in_between_sync( + network, + beacon_config, + slot_duration, + initial_delay, + sync_delay, + ) + .map(|_| println!("Completed in between sync")) + }) +} From 23a35c37672a05fb9267a0af66cc027a2e945460 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 23 Jan 2020 12:46:11 +0530 Subject: [PATCH 13/16] Persist/load DHT on shutdown/startup (#659) * Store dht enrs on shutdown * Load enrs on startup and add tests * Remove enr_entries from behavior * Move all dht persisting logic to `NetworkService` * Move `PersistedDht` from eth2-libp2p to network crate * Add test to confirm dht persistence * Add logging * Remove extra call to beacon_chain persist * Expose only mutable `add_enr` method from behaviour * Fix tests * Fix merge errors --- Cargo.lock | 2 + beacon_node/client/src/lib.rs | 8 -- beacon_node/eth2-libp2p/src/behaviour.rs | 11 ++ beacon_node/eth2-libp2p/src/discovery.rs | 5 + beacon_node/network/Cargo.toml | 2 + beacon_node/network/src/lib.rs | 1 + beacon_node/network/src/persisted_dht.rs | 51 +++++++ beacon_node/network/src/service.rs | 169 ++++++++++++++++++++++- beacon_node/store/src/errors.rs | 1 + beacon_node/store/src/lib.rs | 2 + 10 files changed, 242 insertions(+), 10 deletions(-) create mode 100644 beacon_node/network/src/persisted_dht.rs diff --git a/Cargo.lock b/Cargo.lock index db034d75fd..ee72d36d06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2665,9 +2665,11 @@ dependencies = [ "eth2_ssz 0.1.2", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "genesis 0.1.0", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rlp 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "sloggers 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 2e20f1e671..a396e8646a 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -61,11 +61,3 @@ impl Client { self.libp2p_network.as_ref().map(|n| n.local_enr()) } } - -impl Drop for Client { - fn drop(&mut self) { - if let Some(beacon_chain) = &self.beacon_chain { - let _result = beacon_chain.persist(); - } - } -} diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 608cccf7dd..ddc9292014 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -3,6 +3,7 @@ use crate::rpc::{RPCEvent, RPCMessage, RPC}; use crate::GossipTopic; use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; +use enr::Enr; use futures::prelude::*; use libp2p::{ core::identity::Keypair, @@ -254,6 +255,16 @@ impl Behaviour { pub fn peer_unbanned(&mut self, peer_id: &PeerId) { self.discovery.peer_unbanned(peer_id); } + + /// Returns an iterator over all enr entries in the DHT. + pub fn enr_entries(&mut self) -> impl Iterator { + self.discovery.enr_entries() + } + + /// Add an ENR to the routing table of the discovery mechanism. + pub fn add_enr(&mut self, enr: Enr) { + self.discovery.add_enr(enr); + } } /// The types of events than can be obtained from polling the behaviour. diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index d2c46da1ae..a1d1fa5c9b 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -148,6 +148,11 @@ impl Discovery { self.banned_peers.remove(peer_id); } + /// Returns an iterator over all enr entries in the DHT. + pub fn enr_entries(&mut self) -> impl Iterator { + self.discovery.enr_entries() + } + /// Search for new peers using the underlying discovery mechanism. fn find_peers(&mut self) { // pick a random NodeId diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 906c4f938d..da8c7712c2 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dev-dependencies] sloggers = "0.3.4" +genesis = { path = "../genesis" } [dependencies] beacon_chain = { path = "../beacon_chain" } @@ -24,3 +25,4 @@ smallvec = "1.0.0" # TODO: Remove rand crate for mainnet rand = "0.7.2" fnv = "1.0.6" +rlp = "0.4.3" diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 83d4935cab..fddca767bd 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -2,6 +2,7 @@ pub mod error; pub mod message_handler; pub mod message_processor; +pub mod persisted_dht; pub mod service; pub mod sync; diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs new file mode 100644 index 0000000000..dd894742b5 --- /dev/null +++ b/beacon_node/network/src/persisted_dht.rs @@ -0,0 +1,51 @@ +use eth2_libp2p::Enr; +use rlp; +use store::{DBColumn, Error as StoreError, SimpleStoreItem}; + +/// 32-byte key for accessing the `DhtEnrs`. +pub const DHT_DB_KEY: &str = "PERSISTEDDHTPERSISTEDDHTPERSISTE"; + +/// Wrapper around dht for persistence to disk. +pub struct PersistedDht { + pub enrs: Vec, +} + +impl SimpleStoreItem for PersistedDht { + fn db_column() -> DBColumn { + DBColumn::DhtEnrs + } + + fn as_store_bytes(&self) -> Vec { + rlp::encode_list(&self.enrs) + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + let rlp = rlp::Rlp::new(bytes); + let enrs: Vec = rlp + .as_list() + .map_err(|e| StoreError::RlpError(format!("{}", e)))?; + Ok(PersistedDht { enrs }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use eth2_libp2p::Enr; + use std::str::FromStr; + use std::sync::Arc; + use store::{MemoryStore, Store}; + use types::Hash256; + use types::MinimalEthSpec; + #[test] + fn test_persisted_dht() { + let store = Arc::new(MemoryStore::::open()); + let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()]; + let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); + store + .put(&key, &PersistedDht { enrs: enrs.clone() }) + .unwrap(); + let dht: PersistedDht = store.get(&key).unwrap().unwrap(); + assert_eq!(dht.enrs, enrs); + } +} diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 7936e23d61..e10ab4a45f 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,5 +1,6 @@ use crate::error; use crate::message_handler::{HandlerMessage, MessageHandler}; +use crate::persisted_dht::{PersistedDht, DHT_DB_KEY}; use crate::NetworkConfig; use beacon_chain::{BeaconChain, BeaconChainTypes}; use core::marker::PhantomData; @@ -9,10 +10,12 @@ use eth2_libp2p::{PubsubMessage, RPCEvent}; use futures::prelude::*; use futures::Stream; use parking_lot::Mutex; -use slog::{debug, info, trace}; +use slog::{debug, error, info, trace}; use std::sync::Arc; +use store::Store; use tokio::runtime::TaskExecutor; use tokio::sync::{mpsc, oneshot}; +use types::Hash256; /// The time in seconds that a peer will be banned and prevented from reconnecting. const BAN_PEER_TIMEOUT: u64 = 30; @@ -21,6 +24,8 @@ const BAN_PEER_TIMEOUT: u64 = 30; pub struct Service { libp2p_service: Arc>, libp2p_port: u16, + store: Arc, + log: slog::Logger, _libp2p_exit: oneshot::Sender<()>, _network_send: mpsc::UnboundedSender, _phantom: PhantomData, @@ -35,6 +40,8 @@ impl Service { ) -> error::Result<(Arc, mpsc::UnboundedSender)> { // build the network channel let (network_send, network_recv) = mpsc::unbounded_channel::(); + // Get a reference to the beacon chain store + let store = beacon_chain.store.clone(); // launch message handler thread let message_handler_send = MessageHandler::spawn( beacon_chain, @@ -49,17 +56,32 @@ impl Service { network_log.clone(), )?)); + // Load DHT from store + let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); + let enrs: Vec = match store.get(&key) { + Ok(Some(p)) => { + let p: PersistedDht = p; + p.enrs + } + _ => Vec::new(), + }; + for enr in enrs { + libp2p_service.lock().swarm.add_enr(enr); + } + let libp2p_exit = spawn_service( libp2p_service.clone(), network_recv, message_handler_send, executor, - network_log, + network_log.clone(), config.propagation_percentage, )?; let network_service = Service { libp2p_service, libp2p_port: config.libp2p_port, + store, + log: network_log, _libp2p_exit: libp2p_exit, _network_send: network_send.clone(), _phantom: PhantomData, @@ -117,6 +139,25 @@ impl Service { pub fn libp2p_service(&self) -> Arc> { self.libp2p_service.clone() } + + /// Attempt to persist the enrs in the DHT to `self.store`. + pub fn persist_dht(&self) -> Result<(), store::Error> { + let enrs: Vec = self + .libp2p_service() + .lock() + .swarm + .enr_entries() + .map(|x| x.clone()) + .collect(); + info!( + self.log, + "Persisting DHT to store"; + "Number of peers" => format!("{}", enrs.len()), + ); + let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); + self.store.put(&key, &PersistedDht { enrs })?; + Ok(()) + } } fn spawn_service( @@ -307,3 +348,127 @@ pub enum NetworkMessage { /// Disconnect and bans a peer id. Disconnect { peer_id: PeerId }, } + +impl Drop for Service { + fn drop(&mut self) { + if let Err(e) = self.persist_dht() { + error!( + self.log, + "Failed to persist DHT on drop"; + "error" => format!("{:?}", e) + ) + } else { + info!( + self.log, + "Saved DHT state"; + ) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::builder::BeaconChainBuilder; + use eth2_libp2p::Enr; + use genesis::{generate_deterministic_keypairs, interop_genesis_state}; + use slog::Logger; + use sloggers::{null::NullLoggerBuilder, Build}; + use std::str::FromStr; + use store::{migrate::NullMigrator, SimpleDiskStore}; + use tokio::runtime::Runtime; + use types::{EthSpec, MinimalEthSpec}; + + fn get_logger() -> Logger { + let builder = NullLoggerBuilder; + builder.build().expect("should build logger") + } + + #[test] + fn test_dht_persistence() { + // Create new LevelDB store + let path = "/tmp"; + let store = Arc::new(SimpleDiskStore::open(&std::path::PathBuf::from(path)).unwrap()); + // Create a `BeaconChain` object to pass to `Service` + let validator_count = 8; + let genesis_time = 13371337; + + let log = get_logger(); + let spec = MinimalEthSpec::default_spec(); + + let genesis_state = interop_genesis_state( + &generate_deterministic_keypairs(validator_count), + genesis_time, + &spec, + ) + .expect("should create interop genesis state"); + let chain = BeaconChainBuilder::new(MinimalEthSpec) + .logger(log.clone()) + .store(store) + .store_migrator(NullMigrator) + .genesis_state(genesis_state) + .expect("should build state using recent genesis") + .dummy_eth1_backend() + .expect("should build the dummy eth1 backend") + .null_event_handler() + .testing_slot_clock(std::time::Duration::from_secs(1)) + .expect("should configure testing slot clock") + .reduced_tree_fork_choice() + .expect("should add fork choice to builder") + .build() + .expect("should build"); + let beacon_chain = Arc::new(chain); + let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap(); + let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); + let enrs = vec![enr1, enr2]; + + let runtime = Runtime::new().unwrap(); + + // Create new network service + let (service, _) = Service::new( + beacon_chain.clone(), + &NetworkConfig::default(), + &runtime.executor(), + log.clone(), + ) + .unwrap(); + + // Add enrs manually to dht + for enr in enrs.iter() { + service.libp2p_service().lock().swarm.add_enr(enr.clone()); + } + assert_eq!( + enrs.len(), + service + .libp2p_service() + .lock() + .swarm + .enr_entries() + .collect::>() + .len(), + "DHT should have 2 enrs" + ); + // Drop the service value + std::mem::drop(service); + + // Recover the network service from beacon chain store and fresh network config + let (recovered_service, _) = Service::new( + beacon_chain, + &NetworkConfig::default(), + &runtime.executor(), + log.clone(), + ) + .unwrap(); + assert_eq!( + enrs.len(), + recovered_service + .libp2p_service() + .lock() + .swarm + .enr_entries() + .collect::>() + .len(), + "Recovered DHT should have 2 enrs" + ); + } +} diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 8a03d00c3c..03424c08e0 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -11,6 +11,7 @@ pub enum Error { PartialBeaconStateError, HotColdDBError(HotColdDBError), DBError { message: String }, + RlpError(String), } impl From for Error { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index b15477e0ef..c4f26704df 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -171,6 +171,7 @@ pub enum DBColumn { BeaconStateRoots, BeaconHistoricalRoots, BeaconRandaoMixes, + DhtEnrs, } impl Into<&'static str> for DBColumn { @@ -187,6 +188,7 @@ impl Into<&'static str> for DBColumn { DBColumn::BeaconStateRoots => "bsr", DBColumn::BeaconHistoricalRoots => "bhr", DBColumn::BeaconRandaoMixes => "brm", + DBColumn::DhtEnrs => "dht", } } } From 81b028b805e4d5b9329f2b28b3fa19ac1997f54b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 23 Jan 2020 19:25:13 +1100 Subject: [PATCH 14/16] Advanced error handling for syncing (#819) * Initial block processing thread design * Correct compilation issues * Increase logging and request from all given peers * Patch peer request bug * Adds fork choice to block processing * Adds logging for bug isolation * Patch syncing for chains with skip-slots * Bump block processing error logs * Improve logging for attestation processing * Randomize peer selection during sync * Resuming chains restarts from local finalized slot * Downgrades Arc batches to Rc batches * Add clippy fixes * Add advanced error handling for invalid/malicious batches * Downgrade Rc to Option to pass processed batches to chains * Squash edge case rpc and syncing bugs * Process empty batches which could end chains * Removes last_processed_id concept to account for ending skip-slot batches * Add logging for chain purges * Adds retries to re-request batch logging * Remove bug finding log * Add reviewers suggestions * Revert to master modifications * Line wrapping * Revert to master --- beacon_node/eth2-libp2p/src/rpc/handler.rs | 16 +- .../network/src/sync/range_sync/batch.rs | 24 +- .../network/src/sync/range_sync/chain.rs | 237 ++++++++++++------ .../src/sync/range_sync/chain_collection.rs | 2 + .../network/src/sync/range_sync/range.rs | 9 +- 5 files changed, 195 insertions(+), 93 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 9d90203865..d6424f0afe 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -163,11 +163,16 @@ where *self = InboundSubstreamState::ResponsePendingSend { substream, closing } } - InboundSubstreamState::ResponseIdle(substream) => { - *self = InboundSubstreamState::ResponsePendingSend { - substream: substream.send(error), - closing: true, - }; + InboundSubstreamState::ResponseIdle(mut substream) => { + // check if the stream is already closed + if let Ok(Async::Ready(None)) = substream.poll() { + *self = InboundSubstreamState::Closing(substream); + } else { + *self = InboundSubstreamState::ResponsePendingSend { + substream: substream.send(error), + closing: true, + }; + } } InboundSubstreamState::Closing(substream) => { // let the stream close @@ -314,7 +319,6 @@ where substream: out, request, }; - debug!(self.log, "Added outbound substream id"; "substream_id" => id); self.outbound_substreams .insert(id, (awaiting_stream, delay_key)); } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index e487e795b0..a606149699 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -3,9 +3,11 @@ use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::RequestId; use eth2_libp2p::PeerId; use fnv::FnvHashMap; +use ssz::Encode; use std::cmp::Ordering; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::hash::{Hash, Hasher}; use std::ops::Sub; use types::{BeaconBlock, EthSpec, Hash256, Slot}; @@ -42,11 +44,16 @@ pub struct Batch { /// The hash of the chain root to requested from the peer. pub head_root: Hash256, /// The peer that was originally assigned to the batch. - pub _original_peer: PeerId, + pub original_peer: PeerId, /// The peer that is currently assigned to the batch. pub current_peer: PeerId, - /// The number of retries this batch has undergone. + /// The number of retries this batch has undergone due to a failed request. pub retries: u8, + /// The number of times this batch has attempted to be re-downloaded and re-processed. This + /// occurs when a batch has been received but cannot be processed. + pub reprocess_retries: u8, + /// Marks the batch as undergoing a re-process, with a hash of the original blocks it received. + pub original_hash: Option, /// The blocks that have been downloaded. pub downloaded_blocks: Vec>, } @@ -66,9 +73,11 @@ impl Batch { start_slot, end_slot, head_root, - _original_peer: peer_id.clone(), + original_peer: peer_id.clone(), current_peer: peer_id, retries: 0, + reprocess_retries: 0, + original_hash: None, downloaded_blocks: Vec::new(), } } @@ -81,6 +90,15 @@ impl Batch { step: 1, } } + + /// This gets a hash that represents the blocks currently downloaded. This allows comparing a + /// previously downloaded batch of blocks with a new downloaded batch of blocks. + pub fn hash(&self) -> u64 { + // the hash used is the ssz-encoded list of blocks + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + self.downloaded_blocks.as_ssz_bytes().hash(&mut hasher); + hasher.finish() + } } impl Ord for Batch { diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 165d27ab25..d378184c91 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -28,7 +28,7 @@ const BATCH_BUFFER_SIZE: u8 = 5; /// Invalid batches are attempted to be re-downloaded from other peers. If they cannot be processed /// after `INVALID_BATCH_LOOKUP_ATTEMPTS` times, the chain is considered faulty and all peers will /// be downvoted. -const _INVALID_BATCH_LOOKUP_ATTEMPTS: u8 = 3; +const INVALID_BATCH_LOOKUP_ATTEMPTS: u8 = 3; /// A return type for functions that act on a `Chain` which informs the caller whether the chain /// has been completed and should be removed or to be kept if further processing is @@ -71,9 +71,6 @@ pub struct SyncingChain { /// The next batch id that needs to be processed. to_be_processed_id: BatchId, - /// The last batch id that was processed. - last_processed_id: BatchId, - /// The current state of the chain. pub state: ChainSyncingState, @@ -122,7 +119,6 @@ impl SyncingChain { peer_pool, to_be_downloaded_id: BatchId(1), to_be_processed_id: BatchId(1), - last_processed_id: BatchId(0), state: ChainSyncingState::Stopped, current_processing_id: None, sync_send, @@ -131,6 +127,12 @@ impl SyncingChain { } } + /// Returns the latest slot number that has been processed. + fn current_processed_slot(&self) -> Slot { + self.start_slot + .saturating_add(self.to_be_processed_id.saturating_sub(1u64) * BLOCKS_PER_BATCH) + } + /// A batch of blocks has been received. This function gets run on all chains and should /// return Some if the request id matches a pending request on this chain, or None if it does /// not. @@ -221,16 +223,14 @@ impl SyncingChain { } // Check if there is a batch ready to be processed - while !self.completed_batches.is_empty() + if !self.completed_batches.is_empty() && self.completed_batches[0].id == self.to_be_processed_id { let batch = self.completed_batches.remove(0); - if batch.downloaded_blocks.is_empty() { - // The batch was empty, consider this processed and move to the next batch - self.processed_batches.push(batch); - *self.to_be_processed_id += 1; - continue; - } + + // Note: We now send empty batches to the processor in order to trigger the block + // processor result callback. This is done, because an empty batch could end a chain + // and the logic for removing chains and checking completion is in the callback. // send the batch to the batch processor thread return self.process_batch(batch); @@ -283,27 +283,56 @@ impl SyncingChain { let res = match result { BatchProcessResult::Success => { *self.to_be_processed_id += 1; - // This variable accounts for skip slots and batches that were not actually - // processed due to having no blocks. - self.last_processed_id = batch.id; - // Remove any validate batches awaiting validation. - // Only batches that have blocks are processed here, therefore all previous batches - // have been correct. - let last_processed_id = self.last_processed_id; - self.processed_batches - .retain(|batch| batch.id.0 >= last_processed_id.0); + // If the processed batch was not empty, we can validate previous invalidated + // blocks + if !batch.downloaded_blocks.is_empty() { + // Remove any batches awaiting validation. + // + // All blocks in processed_batches should be prior batches. As the current + // batch has been processed with blocks in it, all previous batches are valid. + // + // If a previous batch has been validated and it had been re-processed, downvote + // the original peer. + while !self.processed_batches.is_empty() { + let processed_batch = self.processed_batches.remove(0); + if *processed_batch.id >= *batch.id { + crit!(self.log, "A processed batch had a greater id than the current process id"; + "processed_id" => *processed_batch.id, + "current_id" => *batch.id); + } - // add the current batch to processed batches to be verified in the future. We are + if let Some(prev_hash) = processed_batch.original_hash { + // The validated batch has been re-processed + if prev_hash != processed_batch.hash() { + // The re-downloaded version was different + if processed_batch.current_peer != processed_batch.original_peer { + // A new peer sent the correct batch, the previous peer did not + // downvote the original peer + // + // If the same peer corrected it's mistake, we allow it.... for + // now. + debug!(self.log, "Re-processed batch validated. Downvoting original peer"; + "batch_id" => *processed_batch.id, + "original_peer" => format!("{}",processed_batch.original_peer), + "new_peer" => format!("{}", processed_batch.current_peer)); + network.downvote_peer(processed_batch.original_peer); + } + } + } + } + } + + // Add the current batch to processed batches to be verified in the future. We are // only uncertain about this batch, if it has not returned all blocks. - if batch.downloaded_blocks.len() < BLOCKS_PER_BATCH as usize { + if batch.downloaded_blocks.last().map(|block| block.slot) + != Some(batch.end_slot.saturating_sub(1u64)) + { self.processed_batches.push(batch); } // check if the chain has completed syncing - if self.start_slot + *self.last_processed_id * BLOCKS_PER_BATCH - >= self.target_head_slot - { + if self.current_processed_slot() >= self.target_head_slot { // chain is completed ProcessingResult::RemoveChain } else { @@ -320,19 +349,104 @@ impl SyncingChain { } } BatchProcessResult::Failed => { - // batch processing failed - // this could be because this batch is invalid, or a previous invalidated batch + warn!(self.log, "Batch processing failed"; "id" => *batch.id, "peer" => format!("{}", batch.current_peer)); + // The batch processing failed + // This could be because this batch is invalid, or a previous invalidated batch // is invalid. We need to find out which and downvote the peer that has sent us // an invalid batch. - // firstly remove any validated batches - self.handle_invalid_batch(network, batch) + // check that we have no exceeded the re-process retry counter + if batch.reprocess_retries > INVALID_BATCH_LOOKUP_ATTEMPTS { + // if a batch has exceeded the invalid batch lookup attempts limit, it means + // that it is likely all peers in this chain are are sending invalid batches + // repeatedly and are either malicious or faulty. We drop the chain and + // downvote all peers. + warn!(self.log, "Batch failed to download. Dropping chain and downvoting peers"; "id"=> *batch.id); + for peer_id in self.peer_pool.drain() { + network.downvote_peer(peer_id); + } + ProcessingResult::RemoveChain + } else { + // Handle this invalid batch, that is within the re-process retries limit. + self.handle_invalid_batch(network, batch); + ProcessingResult::KeepChain + } } }; Some(res) } + /// An invalid batch has been received that could not be processed. + /// + /// These events occur when a peer as successfully responded with blocks, but the blocks we + /// have received are incorrect or invalid. This indicates the peer has not performed as + /// intended and can result in downvoting a peer. + // TODO: Batches could have been partially downloaded due to RPC size-limit restrictions. We + // need to add logic for partial batch downloads. Potentially, if another peer returns the same + // batch, we try a partial download. + fn handle_invalid_batch(&mut self, network: &mut SyncNetworkContext, batch: Batch) { + // The current batch could not be processed, indicating either the current or previous + // batches are invalid + + // The previous batch could be incomplete due to the block sizes being too large to fit in + // a single RPC request or there could be consecutive empty batches which are not supposed + // to be there + + // The current (sub-optimal) strategy is to simply re-request all batches that could + // potentially be faulty. If a batch returns a different result than the original and + // results in successful processing, we downvote the original peer that sent us the batch. + + // If all batches return the same result, we try this process INVALID_BATCH_LOOKUP_ATTEMPTS + // times before considering the entire chain invalid and downvoting all peers. + + // Find any pre-processed batches awaiting validation + while !self.processed_batches.is_empty() { + let past_batch = self.processed_batches.remove(0); + *self.to_be_processed_id = std::cmp::min(*self.to_be_processed_id, *past_batch.id); + self.reprocess_batch(network, past_batch); + } + + // re-process the current batch + self.reprocess_batch(network, batch); + } + + /// This re-downloads and marks the batch as being re-processed. + /// + /// If the re-downloaded batch is different to the original and can be processed, the original + /// peer will be downvoted. + fn reprocess_batch(&mut self, network: &mut SyncNetworkContext, mut batch: Batch) { + // marks the batch as attempting to be reprocessed by hashing the downloaded blocks + batch.original_hash = Some(batch.hash()); + + // remove previously downloaded blocks + batch.downloaded_blocks.clear(); + + // increment the re-process counter + batch.reprocess_retries += 1; + + // attempt to find another peer to download the batch from (this potentially doubles up + // requests on a single peer) + let current_peer = &batch.current_peer; + let new_peer = self + .peer_pool + .iter() + .find(|peer| *peer != current_peer) + .unwrap_or_else(|| current_peer); + + batch.current_peer = new_peer.clone(); + + debug!(self.log, "Re-requesting batch"; + "start_slot" => batch.start_slot, + "end_slot" => batch.end_slot, + "id" => *batch.id, + "peer" => format!("{}", batch.current_peer), + "head_root"=> format!("{}", batch.head_root), + "retries" => batch.retries, + "re-processes" => batch.reprocess_retries); + self.send_batch(network, batch); + } + pub fn stop_syncing(&mut self) { self.state = ChainSyncingState::Stopped; } @@ -351,17 +465,11 @@ impl SyncingChain { // to start from this point and re-index all subsequent batches starting from one // (effectively creating a new chain). - if local_finalized_slot.as_u64() - > self - .start_slot - .as_u64() - .saturating_add(*self.last_processed_id * BLOCKS_PER_BATCH) - { + if local_finalized_slot > self.current_processed_slot() { debug!(self.log, "Updating chain's progress"; - "prev_completed_slot" => self.start_slot + *self.last_processed_id*BLOCKS_PER_BATCH, + "prev_completed_slot" => self.current_processed_slot(), "new_completed_slot" => local_finalized_slot.as_u64()); // Re-index batches - *self.last_processed_id = 0; *self.to_be_downloaded_id = 1; *self.to_be_processed_id = 1; @@ -386,7 +494,7 @@ impl SyncingChain { self.peer_pool.insert(peer_id.clone()); // do not request blocks if the chain is not syncing if let ChainSyncingState::Stopped = self.state { - debug!(self.log, "Peer added to a non-syncing chain"; "peer_id" => format!("{:?}", peer_id)); + debug!(self.log, "Peer added to a non-syncing chain"; "peer_id" => format!("{}", peer_id)); return; } @@ -465,47 +573,8 @@ impl SyncingChain { } } - /// An invalid batch has been received that could not be processed. - /// - /// These events occur when a peer as successfully responded with blocks, but the blocks we - /// have received are incorrect or invalid. This indicates the peer has not performed as - /// intended and can result in downvoting a peer. - fn handle_invalid_batch( - &mut self, - network: &mut SyncNetworkContext, - _batch: Batch, - ) -> ProcessingResult { - // The current batch could not be processed, indicating either the current or previous - // batches are invalid - - // The previous batch could be incomplete due to the block sizes being too large to fit in - // a single RPC request or there could be consecutive empty batches which are not supposed - // to be there - - // The current (sub-optimal) strategy is to simply re-request all batches that could - // potentially be faulty. If a batch returns a different result than the original and - // results in successful processing, we downvote the original peer that sent us the batch. - - // If all batches return the same result, we try this process INVALID_BATCH_LOOKUP_ATTEMPTS - // times before considering the entire chain invalid and downvoting all peers. - - // Firstly, check if there are any past batches that could be invalid. - if !self.processed_batches.is_empty() { - // try and re-download this batch from other peers - } - - //TODO: Implement this logic - // Currently just fail the chain, and drop all associated peers, removing them from the - // peer pool, to prevent re-status - for peer_id in self.peer_pool.drain() { - network.downvote_peer(peer_id); - } - ProcessingResult::RemoveChain - } - - /// Attempts to request the next required batches from the peer pool if the chain is syncing. - /// It will exhaust the peer pool and left over batches until the batch buffer is reached or - /// all peers are exhausted. + /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer + /// pool and left over batches until the batch buffer is reached or all peers are exhausted. fn request_batches(&mut self, network: &mut SyncNetworkContext) { if let ChainSyncingState::Syncing = self.state { while self.send_range_request(network) {} @@ -518,7 +587,12 @@ impl SyncingChain { // find the next pending batch and request it from the peer if let Some(peer_id) = self.get_next_peer() { if let Some(batch) = self.get_next_batch(peer_id) { - debug!(self.log, "Requesting batch"; "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => *batch.id, "peer" => format!("{:?}", batch.current_peer), "head_root"=> format!("{}", batch.head_root)); + debug!(self.log, "Requesting batch"; + "start_slot" => batch.start_slot, + "end_slot" => batch.end_slot, + "id" => *batch.id, + "peer" => format!("{}", batch.current_peer), + "head_root"=> format!("{}", batch.head_root)); // send the batch self.send_batch(network, batch); return true; @@ -531,6 +605,7 @@ impl SyncingChain { /// /// This is used to create the next request. fn get_next_peer(&self) -> Option { + // TODO: Optimize this by combining with above two functions. // randomize the peers for load balancing let mut rng = rand::thread_rng(); let mut peers = self.peer_pool.iter().collect::>(); diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index a42589e9d0..2964ba0b69 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -310,6 +310,7 @@ impl ChainCollection { .block_root_tree .is_known_block_root(&chain.target_head_root) { + debug!(log, "Purging out of finalized chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); chain.status_peers(network); false } else { @@ -322,6 +323,7 @@ impl ChainCollection { .block_root_tree .is_known_block_root(&chain.target_head_root) { + debug!(log, "Purging out of date head chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); chain.status_peers(network); false } else { diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 60b9ea18be..ee7fb8ae72 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -248,8 +248,9 @@ impl RangeSync { }) .is_none(); if id_not_found { - // The request didn't exist in any `SyncingChain`. Could have been an old request. Log - // and ignore + // The request didn't exist in any `SyncingChain`. Could have been an old request or + // the chain was purged due to being out of date whilst a request was pending. Log + // and ignore. debug!(self.log, "Range response without matching request"; "peer" => format!("{:?}", peer_id), "request_id" => request_id); } } @@ -307,7 +308,9 @@ impl RangeSync { } Some((_, ProcessingResult::KeepChain)) => {} None => { - warn!(self.log, "No chains match the block processing id"; "id" => processing_id); + // This can happen if a chain gets purged due to being out of date whilst a + // batch process is in progress. + debug!(self.log, "No chains match the block processing id"; "id" => processing_id); } } } From 4a963423ca681b5041ad94cea2844dde3762de3b Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Thu, 23 Jan 2020 12:27:38 +0400 Subject: [PATCH 15/16] Make docs clearer regarding local vs public testnets (#823) * Clear docs regarding local vs public testnets * Rename private to local --- book/src/SUMMARY.md | 2 +- book/src/{testnets.md => local-testnets.md} | 5 ++++- book/src/simple-testnet.md | 5 +++++ 3 files changed, 10 insertions(+), 2 deletions(-) rename book/src/{testnets.md => local-testnets.md} (95%) diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index df027e1b01..0126fe714d 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -7,8 +7,8 @@ * [Installation](./installation.md) * [Docker](./docker.md) * [CLI](./cli.md) - * [Testnets](./testnets.md) * [Simple Local Testnet](./simple-testnet.md) + * [Local Testnets](./local-testnets.md) * [API](./api.md) * [HTTP (RESTful JSON)](./http.md) * [/beacon](./http_beacon.md) diff --git a/book/src/testnets.md b/book/src/local-testnets.md similarity index 95% rename from book/src/testnets.md rename to book/src/local-testnets.md index a46016be4c..39a5d024b7 100644 --- a/book/src/testnets.md +++ b/book/src/local-testnets.md @@ -1,4 +1,7 @@ -# Testnets +# Local Testnets + +> This section is about running your own private local testnets. +> - If you wish to join the ongoing public testnet, please read [become a validator](./become-a-validator.md). The `beacon_node` and `validator` commands have a `testnet` sub-command to allow creating or connecting to Eth2 beacon chain testnets. diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md index 540c81ffd2..06cc744b53 100644 --- a/book/src/simple-testnet.md +++ b/book/src/simple-testnet.md @@ -1,5 +1,10 @@ # Simple Local Testnet +> This guide is about running your own private local testnet. +> - If you wish to join the ongoing public testnet, please read [become a validator](./become-a-validator.md). + +This guide will help you setup your own private local testnet. + First, [install Lighthouse](./installation.md). Then, get the current unix time in seconds; you can use From d98c00389a8f01e27f85ab3b4ae3aae2aa4898fa Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Thu, 23 Jan 2020 12:28:23 +0400 Subject: [PATCH 16/16] Fix typo in test name; fix clippy warning (#826) --- beacon_node/eth1/tests/test.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 32f793aadd..2d44916c5a 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -807,7 +807,7 @@ mod fast { mod persist { use super::*; #[test] - fn test_persisit_caches() { + fn test_persist_caches() { let mut env = new_env(); let log = env.core_context().log; let runtime = env.runtime(); @@ -830,7 +830,7 @@ mod persist { }; let service = Service::new(config.clone(), log.clone()); let n = 10; - let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect(); + let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { deposit_contract .deposit(runtime, deposit.clone())