diff --git a/Cargo.toml b/Cargo.toml index f087539e6a..d081ee74f7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "eth2/utils/logging", "eth2/utils/eth2_hashing", "eth2/utils/lighthouse_metrics", + "eth2/utils/lighthouse_bootstrap", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/serde_hex", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 32b7e92110..0e42990182 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,11 +6,14 @@ edition = "2018" [dependencies] eth2_config = { path = "../eth2/utils/eth2_config" } +lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } +beacon_chain = { path = "beacon_chain" } types = { path = "../eth2/types" } store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" +rand = "0.7" slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } slog-term = "^2.4.0" slog-async = "^2.3.0" diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 14b072a233..ae89ac1e13 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -5,18 +5,24 @@ authors = ["Paul Hauner ", "Age Manning `, however when it is `Borrowed` it holds a `RwLockReadGuard` (a +/// read-lock on some read/write-locked state). +/// +/// Only has a small subset of the functionality of a `std::borrow::Cow`. +pub enum BeaconStateCow<'a, T: EthSpec> { + Borrowed(RwLockReadGuard<'a, CheckPoint>), + Owned(BeaconState), +} + +impl<'a, T: EthSpec> BeaconStateCow<'a, T> { + pub fn maybe_as_mut_ref(&mut self) -> Option<&mut BeaconState> { + match self { + BeaconStateCow::Borrowed(_) => None, + BeaconStateCow::Owned(ref mut state) => Some(state), + } + } +} + +impl<'a, T: EthSpec> std::ops::Deref for BeaconStateCow<'a, T> { + type Target = BeaconState; + + fn deref(&self) -> &BeaconState { + match self { + BeaconStateCow::Borrowed(checkpoint) => &checkpoint.beacon_state, + BeaconStateCow::Owned(state) => &state, + } + } +} + pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; type LmdGhost: LmdGhost; + type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; } @@ -94,12 +128,10 @@ pub struct BeaconChain { /// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for /// inclusion in a block. pub op_pool: OperationPool, + /// Provides information from the Ethereum 1 (PoW) chain. + pub eth1_chain: Eth1Chain, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. canonical_head: RwLock>, - /// The same state from `self.canonical_head`, but updated at the start of each slot with a - /// skip slot if no block is received. This is effectively a cache that avoids repeating calls - /// to `per_slot_processing`. - state: RwLock>, /// The root of the genesis block. pub genesis_block_root: Hash256, /// A state-machine that is updated with information from the network and chooses a canonical @@ -113,7 +145,7 @@ impl BeaconChain { /// Instantiate a new Beacon Chain, from genesis. pub fn from_genesis( store: Arc, - slot_clock: T::SlotClock, + eth1_backend: T::Eth1Chain, mut genesis_state: BeaconState, mut genesis_block: BeaconBlock, spec: ChainSpec, @@ -140,17 +172,25 @@ impl BeaconChain { genesis_state_root, )); - info!(log, "BeaconChain init"; - "genesis_validator_count" => genesis_state.validators.len(), - "genesis_state_root" => format!("{}", genesis_state_root), - "genesis_block_root" => format!("{}", genesis_block_root), + // Slot clock + let slot_clock = T::SlotClock::from_eth2_genesis( + spec.genesis_slot, + genesis_state.genesis_time, + Duration::from_millis(spec.milliseconds_per_slot), + ) + .ok_or_else(|| Error::SlotClockDidNotStart)?; + + info!(log, "Beacon chain initialized from genesis"; + "validator_count" => genesis_state.validators.len(), + "state_root" => format!("{}", genesis_state_root), + "block_root" => format!("{}", genesis_block_root), ); Ok(Self { spec, slot_clock, op_pool: OperationPool::new(), - state: RwLock::new(genesis_state), + eth1_chain: Eth1Chain::new(eth1_backend), canonical_head, genesis_block_root, fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), @@ -162,6 +202,7 @@ impl BeaconChain { /// Attempt to load an existing instance from the given `store`. pub fn from_store( store: Arc, + eth1_backend: T::Eth1Chain, spec: ChainSpec, log: Logger, ) -> Result>, Error> { @@ -172,24 +213,34 @@ impl BeaconChain { Ok(Some(p)) => p, }; - let slot_clock = T::SlotClock::new( + let state = &p.canonical_head.beacon_state; + + let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, - p.state.genesis_time, - spec.seconds_per_slot, - ); + state.genesis_time, + Duration::from_millis(spec.milliseconds_per_slot), + ) + .ok_or_else(|| Error::SlotClockDidNotStart)?; let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; let last_finalized_block = &p.canonical_head.beacon_block; - let op_pool = p.op_pool.into_operation_pool(&p.state, &spec); + let op_pool = p.op_pool.into_operation_pool(state, &spec); + + info!(log, "Beacon chain initialized from store"; + "head_root" => format!("{}", p.canonical_head.beacon_block_root), + "head_epoch" => format!("{}", p.canonical_head.beacon_block.slot.epoch(T::EthSpec::slots_per_epoch())), + "finalized_root" => format!("{}", last_finalized_root), + "finalized_epoch" => format!("{}", last_finalized_block.slot.epoch(T::EthSpec::slots_per_epoch())), + ); Ok(Some(BeaconChain { spec, slot_clock, fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root), op_pool, + eth1_chain: Eth1Chain::new(eth1_backend), canonical_head: RwLock::new(p.canonical_head), - state: RwLock::new(p.state), genesis_block_root: p.genesis_block_root, store, log, @@ -204,7 +255,6 @@ impl BeaconChain { canonical_head: self.canonical_head.read().clone(), op_pool: PersistedOperationPool::from_operation_pool(&self.op_pool), genesis_block_root: self.genesis_block_root, - state: self.state.read().clone(), }; let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); @@ -215,6 +265,25 @@ impl BeaconChain { Ok(()) } + /// Returns the slot _right now_ according to `self.slot_clock`. Returns `Err` if the slot is + /// unavailable. + /// + /// The slot might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative slot). + pub fn slot(&self) -> Result { + self.slot_clock.now().ok_or_else(|| Error::UnableToReadSlot) + } + + /// Returns the epoch _right now_ according to `self.slot_clock`. Returns `Err` if the epoch is + /// unavailable. + /// + /// The epoch might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative epoch). + pub fn epoch(&self) -> Result { + self.slot() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + } + /// Returns the beacon block body for each beacon block root in `roots`. /// /// Fails if any root in `roots` does not have a corresponding block. @@ -300,12 +369,6 @@ impl BeaconChain { Ok(self.store.get(block_root)?) } - /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been - /// updated to match the current slot clock. - pub fn speculative_state(&self) -> Result>, Error> { - Ok(self.state.read()) - } - /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the /// fork-choice rule). /// @@ -316,46 +379,74 @@ impl BeaconChain { self.canonical_head.read() } + /// Returns the `BeaconState` at the given slot. + /// + /// May return: + /// + /// - A new state loaded from the database (for states prior to the head) + /// - A reference to the head state (note: this keeps a read lock on the head, try to use + /// sparingly). + /// - The head state, but with skipped slots (for states later than the head). + /// + /// Returns `None` when the state is not found in the database or there is an error skipping + /// to a future state. + pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { + let head_state = &self.head().beacon_state; + + if slot == head_state.slot { + Ok(BeaconStateCow::Borrowed(self.head())) + } else if slot > head_state.slot { + let head_state_slot = head_state.slot; + let mut state = head_state.clone(); + drop(head_state); + while state.slot < slot { + match per_slot_processing(&mut state, &self.spec) { + Ok(()) => (), + Err(e) => { + warn!( + self.log, + "Unable to load state at slot"; + "error" => format!("{:?}", e), + "head_slot" => head_state_slot, + "requested_slot" => slot + ); + return Err(Error::NoStateForSlot(slot)); + } + }; + } + Ok(BeaconStateCow::Owned(state)) + } else { + let state_root = self + .rev_iter_state_roots() + .find(|(_root, s)| *s == slot) + .map(|(root, _slot)| root) + .ok_or_else(|| Error::NoStateForSlot(slot))?; + + Ok(BeaconStateCow::Owned( + self.store + .get(&state_root)? + .ok_or_else(|| Error::NoStateForSlot(slot))?, + )) + } + } + + /// Returns the `BeaconState` the current slot (viz., `self.slot()`). + /// + /// - A reference to the head state (note: this keeps a read lock on the head, try to use + /// sparingly). + /// - The head state, but with skipped slots (for states later than the head). + /// + /// Returns `None` when there is an error skipping to a future state or the slot clock cannot + /// be read. + pub fn state_now(&self) -> Result, Error> { + self.state_at_slot(self.slot()?) + } + /// Returns the slot of the highest block in the canonical chain. pub fn best_slot(&self) -> Slot { self.canonical_head.read().beacon_block.slot } - /// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`. - pub fn catchup_state(&self) -> Result<(), Error> { - let spec = &self.spec; - - let present_slot = match self.slot_clock.present_slot() { - Ok(Some(slot)) => slot, - _ => return Err(Error::UnableToReadSlot), - }; - - if self.state.read().slot < present_slot { - let mut state = self.state.write(); - - // If required, transition the new state to the present slot. - for _ in state.slot.as_u64()..present_slot.as_u64() { - // Ensure the next epoch state caches are built in case of an epoch transition. - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - per_slot_processing(&mut *state, spec)?; - } - - state.build_all_caches(spec)?; - } - - Ok(()) - } - - /// Build all of the caches on the current state. - /// - /// Ideally this shouldn't be required, however we leave it here for testing. - pub fn ensure_state_caches_are_built(&self) -> Result<(), Error> { - self.state.write().build_all_caches(&self.spec)?; - - Ok(()) - } - /// Returns the validator index (if any) for the given public key. /// /// Information is retrieved from the present `beacon_state.validators`. @@ -368,26 +459,10 @@ impl BeaconChain { None } - /// Reads the slot clock, returns `None` if the slot is unavailable. - /// - /// The slot might be unavailable due to an error with the system clock, or if the present time - /// is before genesis (i.e., a negative slot). - /// - /// This is distinct to `present_slot`, which simply reads the latest state. If a - /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, - /// `self.state` should undergo per slot processing. - pub fn read_slot_clock(&self) -> Option { - match self.slot_clock.present_slot() { - Ok(Some(some_slot)) => Some(some_slot), - Ok(None) => None, - _ => None, - } - } - /// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since /// genesis. pub fn slots_since_genesis(&self) -> Option { - let now = self.read_slot_clock()?; + let now = self.slot().ok()?; let genesis_slot = self.spec.genesis_slot; if now < genesis_slot { @@ -397,32 +472,35 @@ impl BeaconChain { } } - /// Returns slot of the present state. - /// - /// This is distinct to `read_slot_clock`, which reads from the actual system clock. If - /// `self.state` has not been transitioned it is possible for the system clock to be on a - /// different slot to what is returned from this call. - pub fn present_slot(&self) -> Slot { - self.state.read().slot - } - /// Returns the block proposer for a given slot. /// /// Information is read from the present `beacon_state` shuffling, only information from the /// present epoch is available. pub fn block_proposer(&self, slot: Slot) -> Result { - // Ensures that the present state has been advanced to the present slot, skipping slots if - // blocks are not present. - self.catchup_state()?; + let epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); + let head_state = &self.head().beacon_state; - // TODO: permit lookups of the proposer at any slot. - let index = self.state.read().get_beacon_proposer_index( - slot, - RelativeEpoch::Current, - &self.spec, - )?; + let mut state = if epoch(slot) == epoch(head_state.slot) { + BeaconStateCow::Borrowed(self.head()) + } else { + self.state_at_slot(slot)? + }; - Ok(index) + if let Some(state) = state.maybe_as_mut_ref() { + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + } + + if epoch(state.slot) != epoch(slot) { + return Err(Error::InvariantViolated(format!( + "Epochs in consistent in proposer lookup: state: {}, requested: {}", + epoch(state.slot), + epoch(slot) + ))); + } + + state + .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) + .map_err(Into::into) } /// Returns the attestation slot and shard for a given validator index. @@ -432,15 +510,31 @@ impl BeaconChain { pub fn validator_attestation_slot_and_shard( &self, validator_index: usize, - ) -> Result, BeaconStateError> { - trace!( - "BeaconChain::validator_attestation_slot_and_shard: validator_index: {}", - validator_index - ); - if let Some(attestation_duty) = self - .state - .read() - .get_attestation_duties(validator_index, RelativeEpoch::Current)? + epoch: Epoch, + ) -> Result, Error> { + let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); + let head_state = &self.head().beacon_state; + + let mut state = if epoch == as_epoch(head_state.slot) { + BeaconStateCow::Borrowed(self.head()) + } else { + self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? + }; + + if let Some(state) = state.maybe_as_mut_ref() { + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + } + + if as_epoch(state.slot) != epoch { + return Err(Error::InvariantViolated(format!( + "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", + as_epoch(state.slot), + epoch + ))); + } + + if let Some(attestation_duty) = + state.get_attestation_duties(validator_index, RelativeEpoch::Current)? { Ok(Some((attestation_duty.slot, attestation_duty.shard))) } else { @@ -448,11 +542,16 @@ impl BeaconChain { } } - /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. + /// Produce an `AttestationData` that is valid for the given `slot` `shard`. /// - /// Attests to the canonical chain. - pub fn produce_attestation_data(&self, shard: u64) -> Result { - let state = self.state.read(); + /// Always attests to the canonical chain. + pub fn produce_attestation_data( + &self, + shard: u64, + slot: Slot, + ) -> Result { + let state = self.state_at_slot(slot)?; + let head_block_root = self.head().beacon_block_root; let head_block_slot = self.head().beacon_block.slot; @@ -738,8 +837,19 @@ impl BeaconChain { } else { // Provide the attestation to fork choice, updating the validator latest messages but // _without_ finding and updating the head. - self.fork_choice - .process_attestation(&state, &attestation, block)?; + if let Err(e) = self + .fork_choice + .process_attestation(&state, &attestation, block) + { + error!( + self.log, + "Add attestation to fork choice failed"; + "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), + "beacon_block_root" => format!("{}", attestation.data.beacon_block_root), + "error" => format!("{:?}", e) + ); + return Err(e.into()); + } // Provide the valid attestation to op pool, which may choose to retain the // attestation for inclusion in a future block. @@ -764,14 +874,36 @@ impl BeaconChain { /// Accept some exit and queue it for inclusion in an appropriate block. pub fn process_voluntary_exit(&self, exit: VoluntaryExit) -> Result<(), ExitValidationError> { - self.op_pool - .insert_voluntary_exit(exit, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => self + .op_pool + .insert_voluntary_exit(exit, &*state, &self.spec), + Err(e) => { + error!( + &self.log, + "Unable to process voluntary exit"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some transfer and queue it for inclusion in an appropriate block. pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { - self.op_pool - .insert_transfer(transfer, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => self.op_pool.insert_transfer(transfer, &*state, &self.spec), + Err(e) => { + error!( + &self.log, + "Unable to process transfer"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some proposer slashing and queue it for inclusion in an appropriate block. @@ -779,8 +911,21 @@ impl BeaconChain { &self, proposer_slashing: ProposerSlashing, ) -> Result<(), ProposerSlashingValidationError> { - self.op_pool - .insert_proposer_slashing(proposer_slashing, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => { + self.op_pool + .insert_proposer_slashing(proposer_slashing, &*state, &self.spec) + } + Err(e) => { + error!( + &self.log, + "Unable to process proposer slashing"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some attester slashing and queue it for inclusion in an appropriate block. @@ -788,8 +933,21 @@ impl BeaconChain { &self, attester_slashing: AttesterSlashing, ) -> Result<(), AttesterSlashingValidationError> { - self.op_pool - .insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => { + self.op_pool + .insert_attester_slashing(attester_slashing, &*state, &self.spec) + } + Err(e) => { + error!( + &self.log, + "Unable to process attester slashing"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some block and attempt to add it to block DAG. @@ -803,20 +961,23 @@ impl BeaconChain { let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let finalized_slot = self - .state - .read() + .head() + .beacon_state .finalized_checkpoint .epoch .start_slot(T::EthSpec::slots_per_epoch()); - if block.slot <= finalized_slot { - return Ok(BlockProcessingOutcome::FinalizedSlot); - } - if block.slot == 0 { return Ok(BlockProcessingOutcome::GenesisBlock); } + if block.slot <= finalized_slot { + return Ok(BlockProcessingOutcome::WouldRevertFinalizedSlot { + block_slot: block.slot, + finalized_slot: finalized_slot, + }); + } + let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT); let block_root = block.canonical_root(); @@ -827,9 +988,7 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::GenesisBlock); } - let present_slot = self - .read_slot_clock() - .ok_or_else(|| Error::UnableToReadSlot)?; + let present_slot = self.slot()?; if block.slot > present_slot { return Ok(BlockProcessingOutcome::FutureSlot { @@ -916,7 +1075,10 @@ impl BeaconChain { let state_root = state.canonical_root(); if block.state_root != state_root { - return Ok(BlockProcessingOutcome::StateRootMismatch); + return Ok(BlockProcessingOutcome::StateRootMismatch { + block: block.state_root, + local: state_root, + }); } metrics::stop_timer(state_root_timer); @@ -952,10 +1114,10 @@ impl BeaconChain { if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) { error!( self.log, - "fork choice failed to process_block"; - "error" => format!("{:?}", e), + "Add block to fork choice failed"; + "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), "block_root" => format!("{}", block_root), - "block_slot" => format!("{}", block.slot) + "error" => format!("{:?}", e), ) } @@ -988,7 +1150,7 @@ impl BeaconChain { Ok(BlockProcessingOutcome::Processed { block_root }) } - /// Produce a new block at the specified slot. + /// Produce a new block at the given `slot`. /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. @@ -997,25 +1159,11 @@ impl BeaconChain { randao_reveal: Signature, slot: Slot, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - let state = self.state.read().clone(); + let state = self + .state_at_slot(slot - 1) + .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - self.produce_block_on_state(state, slot, randao_reveal) - } - - /// Produce a new block at the current slot - /// - /// Calls `produce_block`, with the slot parameter set as the current. - /// - /// ** This function is probably obsolete (was for previous RPC), and can probably be removed ** - pub fn produce_current_block( - &self, - randao_reveal: Signature, - ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - let slot = self - .read_slot_clock() - .ok_or_else(|| BlockProductionError::UnableToReadSlot)?; - - self.produce_block(randao_reveal, slot) + self.produce_block_on_state(state.clone(), slot, randao_reveal) } /// Produce a block for some `slot` upon the given `state`. @@ -1064,16 +1212,12 @@ impl BeaconChain { body: BeaconBlockBody { randao_reveal, // TODO: replace with real data. - eth1_data: Eth1Data { - deposit_count: state.eth1_data.deposit_count, - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }, + eth1_data: self.eth1_chain.eth1_data_for_block_production(&state)?, graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), attestations: self.op_pool.get_attestations(&state, &self.spec).into(), - deposits: self.op_pool.get_deposits(&state).into(), + deposits: self.eth1_chain.deposits_for_block_inclusion(&state)?.into(), voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), transfers: self.op_pool.get_transfers(&state, &self.spec).into(), }, @@ -1184,32 +1328,15 @@ impl BeaconChain { } /// Update the canonical head to `new_head`. - fn update_canonical_head(&self, new_head: CheckPoint) -> Result<(), Error> { + fn update_canonical_head(&self, mut new_head: CheckPoint) -> Result<(), Error> { let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); + new_head.beacon_state.build_all_caches(&self.spec)?; + // Update the checkpoint that stores the head of the chain at the time it received the // block. *self.canonical_head.write() = new_head; - // Update the always-at-the-present-slot state we keep around for performance gains. - *self.state.write() = { - let mut state = self.canonical_head.read().beacon_state.clone(); - - let present_slot = match self.slot_clock.present_slot() { - Ok(Some(slot)) => slot, - _ => return Err(Error::UnableToReadSlot), - }; - - // If required, transition the new state to the present slot. - for _ in state.slot.as_u64()..present_slot.as_u64() { - per_slot_processing(&mut state, &self.spec)?; - } - - state.build_all_caches(&self.spec)?; - - state - }; - // Save `self` to `self.store`. self.persist()?; diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs new file mode 100644 index 0000000000..2a3537020a --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -0,0 +1,332 @@ +use crate::{BeaconChain, BeaconChainTypes}; +use eth2_hashing::hash; +use lighthouse_bootstrap::Bootstrapper; +use merkle_proof::MerkleTree; +use rayon::prelude::*; +use slog::Logger; +use ssz::{Decode, Encode}; +use state_processing::initialize_beacon_state_from_eth1; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::SystemTime; +use tree_hash::{SignedRoot, TreeHash}; +use types::{ + BeaconBlock, BeaconState, ChainSpec, Deposit, DepositData, Domain, EthSpec, Fork, Hash256, + Keypair, PublicKey, Signature, +}; + +enum BuildStrategy { + FromGenesis { + genesis_state: Box>, + genesis_block: Box>, + }, + LoadFromStore, +} + +pub struct BeaconChainBuilder { + build_strategy: BuildStrategy, + spec: ChainSpec, + log: Logger, +} + +impl BeaconChainBuilder { + pub fn recent_genesis( + keypairs: &[Keypair], + minutes: u64, + spec: ChainSpec, + log: Logger, + ) -> Result { + Self::quick_start(recent_genesis_time(minutes), keypairs, spec, log) + } + + pub fn quick_start( + genesis_time: u64, + keypairs: &[Keypair], + spec: ChainSpec, + log: Logger, + ) -> Result { + let genesis_state = interop_genesis_state(keypairs, genesis_time, &spec)?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + + pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let file = File::open(file.clone()) + .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; + + let genesis_state = serde_yaml::from_reader(file) + .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + + pub fn ssz_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let mut file = File::open(file.clone()) + .map_err(|e| format!("Unable to open SSZ genesis state file {:?}: {:?}", file, e))?; + + let mut bytes = vec![]; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Failed to read SSZ file: {:?}", e))?; + + let genesis_state = BeaconState::from_ssz_bytes(&bytes) + .map_err(|e| format!("Unable to parse SSZ genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + + pub fn json_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let file = File::open(file.clone()) + .map_err(|e| format!("Unable to open JSON genesis state file {:?}: {:?}", file, e))?; + + let genesis_state = serde_json::from_reader(file) + .map_err(|e| format!("Unable to parse JSON genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + + pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { + let bootstrapper = Bootstrapper::connect(server.to_string(), &log) + .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; + + let (genesis_state, genesis_block) = bootstrapper + .genesis() + .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; + + Ok(Self { + build_strategy: BuildStrategy::FromGenesis { + genesis_block: Box::new(genesis_block), + genesis_state: Box::new(genesis_state), + }, + spec, + log, + }) + } + + fn from_genesis_state( + genesis_state: BeaconState, + spec: ChainSpec, + log: Logger, + ) -> Self { + Self { + build_strategy: BuildStrategy::FromGenesis { + genesis_block: Box::new(genesis_block(&genesis_state, &spec)), + genesis_state: Box::new(genesis_state), + }, + spec, + log, + } + } + + pub fn from_store(spec: ChainSpec, log: Logger) -> Self { + Self { + build_strategy: BuildStrategy::LoadFromStore, + spec, + log, + } + } + + pub fn build( + self, + store: Arc, + eth1_backend: T::Eth1Chain, + ) -> Result, String> { + Ok(match self.build_strategy { + BuildStrategy::LoadFromStore => { + BeaconChain::from_store(store, eth1_backend, self.spec, self.log) + .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? + .ok_or_else(|| format!("Unable to find exising BeaconChain in database."))? + } + BuildStrategy::FromGenesis { + genesis_block, + genesis_state, + } => BeaconChain::from_genesis( + store, + eth1_backend, + genesis_state.as_ref().clone(), + genesis_block.as_ref().clone(), + self.spec, + self.log, + ) + .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e))?, + }) + } +} + +fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) -> BeaconBlock { + let mut genesis_block = BeaconBlock::empty(&spec); + + genesis_block.state_root = genesis_state.canonical_root(); + + genesis_block +} + +/// Builds a genesis state as defined by the Eth2 interop procedure (see below). +/// +/// Reference: +/// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start +fn interop_genesis_state( + keypairs: &[Keypair], + genesis_time: u64, + spec: &ChainSpec, +) -> Result, String> { + let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); + let eth1_timestamp = 2_u64.pow(40); + let amount = spec.max_effective_balance; + + let withdrawal_credentials = |pubkey: &PublicKey| { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) + }; + + let datas = keypairs + .into_par_iter() + .map(|keypair| { + let mut data = DepositData { + withdrawal_credentials: withdrawal_credentials(&keypair.pk), + pubkey: keypair.pk.clone().into(), + amount, + signature: Signature::empty_signature().into(), + }; + + let domain = spec.get_domain( + spec.genesis_slot.epoch(T::slots_per_epoch()), + Domain::Deposit, + &Fork::default(), + ); + data.signature = Signature::new(&data.signed_root()[..], domain, &keypair.sk).into(); + + data + }) + .collect::>(); + + let deposit_root_leaves = datas + .par_iter() + .map(|data| Hash256::from_slice(&data.tree_hash_root())) + .collect::>(); + + let mut proofs = vec![]; + for i in 1..=deposit_root_leaves.len() { + // Note: this implementation is not so efficient. + // + // If `MerkleTree` had a push method, we could just build one tree and sample it instead of + // rebuilding the tree for each deposit. + let tree = MerkleTree::create( + &deposit_root_leaves[0..i], + spec.deposit_contract_tree_depth as usize, + ); + + let (_, mut proof) = tree.generate_proof(i - 1, spec.deposit_contract_tree_depth as usize); + proof.push(Hash256::from_slice(&int_to_bytes32(i))); + + assert_eq!( + proof.len(), + spec.deposit_contract_tree_depth as usize + 1, + "Deposit proof should be correct len" + ); + + proofs.push(proof); + } + + let deposits = datas + .into_par_iter() + .zip(proofs.into_par_iter()) + .map(|(data, proof)| (data, proof.into())) + .map(|(data, proof)| Deposit { proof, data }) + .collect::>(); + + let mut state = + initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits, spec) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + state.genesis_time = genesis_time; + + // Invalid all the caches after all the manual state surgery. + state.drop_all_caches(); + + Ok(state) +} + +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: usize) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} + +/// Returns the system time, mod 30 minutes. +/// +/// Used for easily creating testnets. +fn recent_genesis_time(minutes: u64) -> u64 { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); + now - secs_after_last_period +} + +#[cfg(test)] +mod test { + use super::*; + use types::{test_utils::generate_deterministic_keypairs, EthSpec, MinimalEthSpec}; + + type TestEthSpec = MinimalEthSpec; + + #[test] + fn interop_state() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state::(&keypairs, genesis_time, spec) + .expect("should build state"); + + assert_eq!( + state.eth1_data.block_hash, + Hash256::from_slice(&[0x42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time, genesis_time, + "genesis time should be as specified" + ); + + for b in &state.balances { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for v in &state.validators { + let creds = v.withdrawal_credentials.as_bytes(); + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ) + } + + assert_eq!( + state.balances.len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators.len(), + validator_count, + "validator count should be correct" + ); + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index cd3a07bcd0..0306899288 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,3 +1,4 @@ +use crate::eth1_chain::Error as Eth1ChainError; use crate::fork_choice::Error as ForkChoiceError; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::BlockProcessingError; @@ -23,6 +24,8 @@ pub enum BeaconChainError { previous_epoch: Epoch, new_epoch: Epoch, }, + SlotClockDidNotStart, + NoStateForSlot(Slot), UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), @@ -31,24 +34,30 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), + UnableToAdvanceState(String), NoStateForAttestation { beacon_block_root: Hash256, }, AttestationValidationError(AttestationValidationError), + /// Returned when an internal check fails, indicating corrupt data. + InvariantViolated(String), } easy_from_to!(SlotProcessingError, BeaconChainError); +easy_from_to!(AttestationValidationError, BeaconChainError); #[derive(Debug, PartialEq)] pub enum BlockProductionError { UnableToGetBlockRootFromState, UnableToReadSlot, + UnableToProduceAtSlot(Slot), SlotProcessingError(SlotProcessingError), BlockProcessingError(BlockProcessingError), + Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), } easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); -easy_from_to!(AttestationValidationError, BeaconChainError); +easy_from_to!(Eth1ChainError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs new file mode 100644 index 0000000000..e4ccee3ba4 --- /dev/null +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -0,0 +1,110 @@ +use crate::BeaconChainTypes; +use eth2_hashing::hash; +use std::marker::PhantomData; +use types::{BeaconState, Deposit, Eth1Data, EthSpec, Hash256}; + +type Result = std::result::Result; + +/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. +pub struct Eth1Chain { + backend: T::Eth1Chain, +} + +impl Eth1Chain { + pub fn new(backend: T::Eth1Chain) -> Self { + Self { backend } + } + + /// Returns the `Eth1Data` that should be included in a block being produced for the given + /// `state`. + pub fn eth1_data_for_block_production( + &self, + state: &BeaconState, + ) -> Result { + self.backend.eth1_data(state) + } + + /// Returns a list of `Deposits` that may be included in a block. + /// + /// Including all of the returned `Deposits` in a block should _not_ cause it to become + /// invalid. + pub fn deposits_for_block_inclusion( + &self, + state: &BeaconState, + ) -> Result> { + let deposits = self.backend.queued_deposits(state)?; + + // TODO: truncate deposits if required. + + Ok(deposits) + } +} + +#[derive(Debug, PartialEq)] +pub enum Error { + /// Unable to return an Eth1Data for the given epoch. + EpochUnavailable, + /// An error from the backend service (e.g., the web3 data fetcher). + BackendError(String), +} + +pub trait Eth1ChainBackend: Sized + Send + Sync { + fn new(server: String) -> Result; + + /// Returns the `Eth1Data` that should be included in a block being produced for the given + /// `state`. + fn eth1_data(&self, beacon_state: &BeaconState) -> Result; + + /// Returns all `Deposits` between `state.eth1_deposit_index` and + /// `state.eth1_data.deposit_count`. + /// + /// # Note: + /// + /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may + /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result>; +} + +pub struct InteropEth1ChainBackend { + _phantom: PhantomData, +} + +impl Eth1ChainBackend for InteropEth1ChainBackend { + fn new(_server: String) -> Result { + Ok(Self::default()) + } + + fn eth1_data(&self, state: &BeaconState) -> Result { + let current_epoch = state.current_epoch(); + let slots_per_voting_period = T::slots_per_eth1_voting_period() as u64; + let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; + + let deposit_root = hash(&int_to_bytes32(current_voting_period)); + let block_hash = hash(&deposit_root); + + Ok(Eth1Data { + deposit_root: Hash256::from_slice(&deposit_root), + deposit_count: state.eth1_deposit_index, + block_hash: Hash256::from_slice(&block_hash), + }) + } + + fn queued_deposits(&self, _: &BeaconState) -> Result> { + Ok(vec![]) + } +} + +impl Default for InteropEth1ChainBackend { + fn default() -> Self { + Self { + _phantom: PhantomData, + } + } +} + +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: u64) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 77fdaacdc5..26084e04a7 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -199,6 +199,14 @@ impl ForkChoice { self.backend.latest_message(validator_index) } + /// Runs an integrity verification function on the underlying fork choice algorithm. + /// + /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, + /// `Err(description)` otherwise. + pub fn verify_integrity(&self) -> core::result::Result<(), String> { + self.backend.verify_integrity() + } + /// Inform the fork choice that the given block (and corresponding root) have been finalized so /// it may prune it's storage. /// diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index cc7725dd83..0361723489 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -3,8 +3,10 @@ extern crate lazy_static; mod beacon_chain; +mod beacon_chain_builder; mod checkpoint; mod errors; +mod eth1_chain; mod fork_choice; mod iter; mod metrics; @@ -16,6 +18,8 @@ pub use self::beacon_chain::{ }; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; +pub use beacon_chain_builder::BeaconChainBuilder; +pub use eth1_chain::{Eth1ChainBackend, InteropEth1ChainBackend}; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index 8b9f78dc5b..a85f78ac82 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -3,7 +3,7 @@ use operation_pool::PersistedOperationPool; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error as StoreError, StoreItem}; -use types::{BeaconState, Hash256}; +use types::Hash256; /// 32-byte key for accessing the `PersistedBeaconChain`. pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA"; @@ -13,7 +13,6 @@ pub struct PersistedBeaconChain { pub canonical_head: CheckPoint, pub op_pool: PersistedOperationPool, pub genesis_block_root: Hash256, - pub state: BeaconState, } impl StoreItem for PersistedBeaconChain { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a10a892a75..7670ac74e4 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,23 +1,28 @@ -use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use crate::{ + AttestationProcessingOutcome, BeaconChain, BeaconChainBuilder, BeaconChainTypes, + BlockProcessingOutcome, InteropEth1ChainBackend, +}; use lmd_ghost::LmdGhost; use rayon::prelude::*; -use sloggers::{null::NullLoggerBuilder, Build}; -use slot_clock::SlotClock; +use sloggers::{terminal::TerminalLoggerBuilder, types::Severity, Build}; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; use store::MemoryStore; -use store::Store; use tree_hash::{SignedRoot, TreeHash}; use types::{ - test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, - AttestationDataAndCustodyBit, BeaconBlock, BeaconState, BitList, ChainSpec, Domain, EthSpec, - Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, + AggregateSignature, Attestation, AttestationDataAndCustodyBit, BeaconBlock, BeaconState, + BitList, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, + Slot, }; +pub use types::test_utils::generate_deterministic_keypairs; + pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; +pub const HARNESS_GENESIS_TIME: u64 = 1567552690; // 4th September 2019 + /// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { @@ -61,6 +66,7 @@ where type Store = MemoryStore; type SlotClock = TestingSlotClock; type LmdGhost = L; + type Eth1Chain = InteropEth1ChainBackend; type EthSpec = E; } @@ -84,53 +90,21 @@ where E: EthSpec, { /// Instantiate a new harness with `validator_count` initial validators. - pub fn new(validator_count: usize) -> Self { - let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( - validator_count, - &E::default_spec(), - ); - let (genesis_state, keypairs) = state_builder.build(); - - Self::from_state_and_keypairs(genesis_state, keypairs) - } - - /// Instantiate a new harness with an initial validator for each key supplied. - pub fn from_keypairs(keypairs: Vec) -> Self { - let state_builder = TestingBeaconStateBuilder::from_keypairs(keypairs, &E::default_spec()); - let (genesis_state, keypairs) = state_builder.build(); - - Self::from_state_and_keypairs(genesis_state, keypairs) - } - - /// Instantiate a new harness with the given genesis state and a keypair for each of the - /// initial validators in the given state. - pub fn from_state_and_keypairs(genesis_state: BeaconState, keypairs: Vec) -> Self { + pub fn new(keypairs: Vec) -> Self { let spec = E::default_spec(); + let log = TerminalLoggerBuilder::new() + .level(Severity::Warning) + .build() + .expect("logger should build"); + let store = Arc::new(MemoryStore::open()); - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - - let builder = NullLoggerBuilder; - let log = builder.build().expect("logger should build"); - - // Slot clock - let slot_clock = TestingSlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ); - - let chain = BeaconChain::from_genesis( - store, - slot_clock, - genesis_state, - genesis_block, - spec.clone(), - log, - ) - .expect("Terminate if beacon chain generation fails"); + let chain = + BeaconChainBuilder::quick_start(HARNESS_GENESIS_TIME, &keypairs, spec.clone(), log) + .unwrap_or_else(|e| panic!("Failed to create beacon chain builder: {}", e)) + .build(store.clone(), InteropEth1ChainBackend::default()) + .unwrap_or_else(|e| panic!("Failed to build beacon chain: {}", e)); Self { chain, @@ -144,7 +118,6 @@ where /// Does not produce blocks or attestations. pub fn advance_slot(&self) { self.chain.slot_clock.advance_slot(); - self.chain.catchup_state().expect("should catchup state"); } /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the @@ -166,26 +139,27 @@ where // Determine the slot for the first block (or skipped block). let state_slot = match block_strategy { BlockStrategy::OnCanonicalHead => { - self.chain.read_slot_clock().expect("should know slot") - 1 + self.chain.slot().expect("should have a slot") - 1 } BlockStrategy::ForkCanonicalChainAt { previous_slot, .. } => previous_slot, }; - self.get_state_at_slot(state_slot) + self.chain + .state_at_slot(state_slot) + .expect("should find state for slot") + .clone() }; // Determine the first slot where a block should be built. let mut slot = match block_strategy { - BlockStrategy::OnCanonicalHead => { - self.chain.read_slot_clock().expect("should know slot") - } + BlockStrategy::OnCanonicalHead => self.chain.slot().expect("should have a slot"), BlockStrategy::ForkCanonicalChainAt { first_slot, .. } => first_slot, }; let mut head_block_root = None; for _ in 0..num_blocks { - while self.chain.read_slot_clock().expect("should have a slot") < slot { + while self.chain.slot().expect("should have a slot") < slot { self.advance_slot(); } @@ -211,21 +185,6 @@ where head_block_root.expect("did not produce any blocks") } - fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { - let state_root = self - .chain - .rev_iter_state_roots() - .find(|(_hash, slot)| *slot == state_slot) - .map(|(hash, _slot)| hash) - .expect("could not find state root"); - - self.chain - .store - .get(&state_root) - .expect("should read db") - .expect("should find state root") - } - /// Returns a newly created block, signed by the proposer for the given slot. fn build_block( &self, @@ -299,9 +258,14 @@ where ) .into_iter() .for_each(|attestation| { - self.chain + match self + .chain .process_attestation(attestation) - .expect("should process attestation"); + .expect("should not error during attestation processing") + { + AttestationProcessingOutcome::Processed => (), + other => panic!("did not successfully process attestation: {:?}", other), + } }); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 22b667f159..82fc882168 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -3,11 +3,14 @@ #[macro_use] extern crate lazy_static; -use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, - BEACON_CHAIN_DB_KEY, -}; use beacon_chain::AttestationProcessingOutcome; +use beacon_chain::{ + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, + BEACON_CHAIN_DB_KEY, + }, + BlockProcessingOutcome, +}; use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; @@ -25,7 +28,7 @@ lazy_static! { type TestForkChoice = ThreadSafeReducedTree; fn get_harness(validator_count: usize) -> BeaconChainHarness { - let harness = BeaconChainHarness::from_keypairs(KEYPAIRS[0..validator_count].to_vec()); + let harness = BeaconChainHarness::new(KEYPAIRS[0..validator_count].to_vec()); harness.advance_slot(); @@ -322,7 +325,9 @@ fn roundtrip_operation_pool() { let p: PersistedBeaconChain> = harness.chain.store.get(&key).unwrap().unwrap(); - let restored_op_pool = p.op_pool.into_operation_pool(&p.state, &harness.spec); + let restored_op_pool = p + .op_pool + .into_operation_pool(&p.canonical_head.beacon_state, &harness.spec); assert_eq!(harness.chain.op_pool, restored_op_pool); } @@ -459,3 +464,48 @@ fn free_attestations_added_to_fork_choice_all_updated() { } } } + +fn run_skip_slot_test(skip_slots: u64) { + let num_validators = 8; + let harness_a = get_harness(num_validators); + let harness_b = get_harness(num_validators); + + for _ in 0..skip_slots { + harness_a.advance_slot(); + harness_b.advance_slot(); + } + + harness_a.extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + // No attestation required for test. + AttestationStrategy::SomeValidators(vec![]), + ); + + assert_eq!( + harness_a.chain.head().beacon_block.slot, + Slot::new(skip_slots + 1) + ); + assert_eq!(harness_b.chain.head().beacon_block.slot, Slot::new(0)); + + assert_eq!( + harness_b + .chain + .process_block(harness_a.chain.head().beacon_block.clone()), + Ok(BlockProcessingOutcome::Processed { + block_root: harness_a.chain.head().beacon_block_root + }) + ); + + assert_eq!( + harness_b.chain.head().beacon_block.slot, + Slot::new(skip_slots + 1) + ); +} + +#[test] +fn produces_and_processes_with_genesis_skip_slots() { + for i in 0..MinimalEthSpec::slots_per_epoch() * 4 { + run_skip_slot_test(i) + } +} diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs deleted file mode 100644 index 5168c067a9..0000000000 --- a/beacon_node/client/src/beacon_chain_types.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::bootstrapper::Bootstrapper; -use crate::error::Result; -use crate::{config::GenesisState, ClientConfig}; -use beacon_chain::{ - lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, - slot_clock::SystemTimeSlotClock, - store::Store, - BeaconChain, BeaconChainTypes, -}; -use slog::{crit, info, Logger}; -use slot_clock::SlotClock; -use std::fs::File; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::SystemTime; -use tree_hash::TreeHash; -use types::{ - test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, -}; - -/// Provides a new, initialized `BeaconChain` -pub trait InitialiseBeaconChain { - fn initialise_beacon_chain( - store: Arc, - config: &ClientConfig, - spec: ChainSpec, - log: Logger, - ) -> Result> { - maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, config, spec, log) - } -} - -#[derive(Clone)] -pub struct ClientType { - _phantom_t: PhantomData, - _phantom_u: PhantomData, -} - -impl BeaconChainTypes for ClientType -where - S: Store + 'static, - E: EthSpec, -{ - type Store = S; - type SlotClock = SystemTimeSlotClock; - type LmdGhost = ThreadSafeReducedTree; - type EthSpec = E; -} -impl InitialiseBeaconChain for ClientType {} - -/// Loads a `BeaconChain` from `store`, if it exists. Otherwise, create a new chain from genesis. -fn maybe_load_from_store_for_testnet( - store: Arc, - config: &ClientConfig, - spec: ChainSpec, - log: Logger, -) -> Result> -where - T: BeaconChainTypes, - T::LmdGhost: LmdGhost, -{ - let genesis_state = match &config.genesis_state { - GenesisState::Mainnet => { - crit!(log, "This release does not support mainnet genesis state."); - return Err("Mainnet is unsupported".into()); - } - GenesisState::RecentGenesis { validator_count } => { - generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec) - } - GenesisState::Generated { - validator_count, - genesis_time, - } => generate_testnet_genesis_state(*validator_count, *genesis_time, &spec), - GenesisState::Yaml { file } => { - let file = File::open(file).map_err(|e| { - format!("Unable to open YAML genesis state file {:?}: {:?}", file, e) - })?; - - serde_yaml::from_reader(file) - .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? - } - GenesisState::HttpBootstrap { server } => { - let bootstrapper = Bootstrapper::from_server_string(server.to_string()) - .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; - - let (state, _block) = bootstrapper - .genesis() - .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; - - state - } - }; - - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - let genesis_block_root = genesis_block.canonical_root(); - - // Slot clock - let slot_clock = T::SlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ); - - // Try load an existing `BeaconChain` from the store. If unable, create a new one. - if let Ok(Some(beacon_chain)) = - BeaconChain::from_store(store.clone(), spec.clone(), log.clone()) - { - // Here we check to ensure that the `BeaconChain` loaded from store has the expected - // genesis block. - // - // Without this check, it's possible that there will be an existing DB with a `BeaconChain` - // that has different parameters than provided to this executable. - if beacon_chain.genesis_block_root == genesis_block_root { - info!( - log, - "Loaded BeaconChain from store"; - "slot" => beacon_chain.head().beacon_state.slot, - "best_slot" => beacon_chain.best_slot(), - ); - - Ok(beacon_chain) - } else { - crit!( - log, - "The BeaconChain loaded from disk has an incorrect genesis root. \ - This may be caused by an old database in located in datadir." - ); - Err("Incorrect genesis root".into()) - } - } else { - BeaconChain::from_genesis( - store, - slot_clock, - genesis_state, - genesis_block, - spec, - log.clone(), - ) - .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e).into()) - } -} - -fn generate_testnet_genesis_state( - validator_count: usize, - genesis_time: u64, - spec: &ChainSpec, -) -> BeaconState { - let (mut genesis_state, _keypairs) = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec) - .build(); - - genesis_state.genesis_time = genesis_time; - - genesis_state -} - -/// Returns the system time, mod 30 minutes. -/// -/// Used for easily creating testnets. -fn recent_genesis_time() -> u64 { - let now = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs(); - let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); - // genesis is now the last 30 minute block. - now - secs_after_last_period -} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index ea8186dbc9..5b0553c5bd 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,15 +1,11 @@ -use crate::{Bootstrapper, Eth2Config}; use clap::ArgMatches; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; -use slog::{info, o, warn, Drain}; +use slog::{info, o, Drain}; use std::fs::{self, OpenOptions}; use std::path::PathBuf; use std::sync::Mutex; -/// The number initial validators when starting the `Minimal`. -const TESTNET_VALIDATOR_COUNT: usize = 16; - /// The number initial validators when starting the `Minimal`. const TESTNET_SPEC_CONSTANTS: &str = "minimal"; @@ -21,33 +17,73 @@ pub struct Config { db_name: String, pub log_file: PathBuf, pub spec_constants: String, - pub genesis_state: GenesisState, + /// Defines how we should initialize a BeaconChain instances. + /// + /// This field is not serialized, there for it will not be written to (or loaded from) config + /// files. It can only be configured via the CLI. + #[serde(skip)] + pub beacon_chain_start_method: BeaconChainStartMethod, + pub eth1_backend_method: Eth1BackendMethod, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub rest_api: rest_api::ApiConfig, } -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum GenesisState { - /// Use the mainnet genesis state. - /// - /// Mainnet genesis state is not presently known, so this is a place-holder. +/// Defines how the client should initialize a BeaconChain. +/// +/// In general, there are two methods: +/// - resuming a new chain, or +/// - initializing a new one. +#[derive(Debug, Clone)] +pub enum BeaconChainStartMethod { + /// Resume from an existing BeaconChain, loaded from the existing local database. + Resume, + /// Resume from an existing BeaconChain, loaded from the existing local database. Mainnet, - /// Generate a state with `validator_count` validators, all with well-known secret keys. + /// Create a new beacon chain that can connect to mainnet. /// /// Set the genesis time to be the start of the previous 30-minute window. - RecentGenesis { validator_count: usize }, - /// Generate a state with `genesis_time` and `validator_count` validators, all with well-known + RecentGenesis { + validator_count: usize, + minutes: u64, + }, + /// Create a new beacon chain with `genesis_time` and `validator_count` validators, all with well-known /// secret keys. Generated { validator_count: usize, genesis_time: u64, }, - /// Load a YAML-encoded genesis state from a file. + /// Create a new beacon chain by loading a YAML-encoded genesis state from a file. Yaml { file: PathBuf }, - /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. - HttpBootstrap { server: String }, + /// Create a new beacon chain by loading a SSZ-encoded genesis state from a file. + Ssz { file: PathBuf }, + /// Create a new beacon chain by loading a JSON-encoded genesis state from a file. + Json { file: PathBuf }, + /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and + /// finalized states and blocks. + HttpBootstrap { server: String, port: Option }, +} + +impl Default for BeaconChainStartMethod { + fn default() -> Self { + BeaconChainStartMethod::Resume + } +} + +/// Defines which Eth1 backend the client should use. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum Eth1BackendMethod { + /// Use the mocked eth1 backend used in interop testing + Interop, + /// Use a web3 connection to a running Eth1 node. + Web3 { server: String }, +} + +impl Default for Eth1BackendMethod { + fn default() -> Self { + Eth1BackendMethod::Interop + } } impl Default for Config { @@ -58,12 +94,11 @@ impl Default for Config { db_type: "disk".to_string(), db_name: "chain_db".to_string(), network: NetworkConfig::new(), - rpc: rpc::RPCConfig::default(), - rest_api: rest_api::ApiConfig::default(), + rpc: <_>::default(), + rest_api: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), - genesis_state: GenesisState::RecentGenesis { - validator_count: TESTNET_VALIDATOR_COUNT, - }, + beacon_chain_start_method: <_>::default(), + eth1_backend_method: <_>::default(), } } } @@ -76,6 +111,8 @@ impl Config { } /// Returns the core path for the client. + /// + /// Creates the directory if it does not exist. pub fn data_dir(&self) -> Option { let path = dirs::home_dir()?.join(&self.data_dir); fs::create_dir_all(&path).ok()?; @@ -127,15 +164,6 @@ impl Config { self.data_dir = PathBuf::from(dir); }; - if let Some(default_spec) = args.value_of("default-spec") { - match default_spec { - "mainnet" => self.spec_constants = Eth2Config::mainnet().spec_constants, - "minimal" => self.spec_constants = Eth2Config::minimal().spec_constants, - "interop" => self.spec_constants = Eth2Config::interop().spec_constants, - _ => {} // not supported - } - } - if let Some(dir) = args.value_of("db") { self.db_type = dir.to_string(); }; @@ -149,40 +177,6 @@ impl Config { self.update_logger(log)?; }; - // If the `--bootstrap` flag is provided, overwrite the default configuration. - if let Some(server) = args.value_of("bootstrap") { - do_bootstrapping(self, server.to_string(), &log)?; - } - Ok(()) } } - -/// Perform the HTTP bootstrapping procedure, reading an ENR and multiaddr from the HTTP server and -/// adding them to the `config`. -fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> { - // Set the genesis state source. - config.genesis_state = GenesisState::HttpBootstrap { - server: server.to_string(), - }; - - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - - config.network.boot_nodes.push(bootstrapper.enr()?); - - if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr() { - info!( - log, - "Estimated bootstrapper libp2p address"; - "multiaddr" => format!("{:?}", server_multiaddr) - ); - config.network.libp2p_nodes.push(server_multiaddr); - } else { - warn!( - log, - "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." - ); - } - - Ok(()) -} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 7dd7118a78..1d3cb40ecf 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,31 +1,48 @@ extern crate slog; -mod beacon_chain_types; -mod bootstrapper; mod config; pub mod error; pub mod notifier; -use beacon_chain::BeaconChain; +use beacon_chain::{ + lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, store::Store, + test_utils::generate_deterministic_keypairs, BeaconChain, BeaconChainBuilder, +}; use exit_future::Signal; use futures::{future::Future, Stream}; use network::Service as NetworkService; -use slog::{error, info, o}; +use slog::{crit, error, info, o}; use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; +use types::EthSpec; -pub use beacon_chain::BeaconChainTypes; -pub use beacon_chain_types::ClientType; -pub use beacon_chain_types::InitialiseBeaconChain; -pub use bootstrapper::Bootstrapper; -pub use config::{Config as ClientConfig, GenesisState}; +pub use beacon_chain::{BeaconChainTypes, Eth1ChainBackend, InteropEth1ChainBackend}; +pub use config::{BeaconChainStartMethod, Config as ClientConfig, Eth1BackendMethod}; pub use eth2_config::Eth2Config; +#[derive(Clone)] +pub struct ClientType { + _phantom_s: PhantomData, + _phantom_e: PhantomData, +} + +impl BeaconChainTypes for ClientType +where + S: Store + 'static, + E: EthSpec, +{ + type Store = S; + type SlotClock = SystemTimeSlotClock; + type LmdGhost = ThreadSafeReducedTree; + type Eth1Chain = InteropEth1ChainBackend; + type EthSpec = E; +} + /// Main beacon node client service. This provides the connection and initialisation of the clients /// sub-services in multiple threads. pub struct Client { @@ -49,7 +66,7 @@ pub struct Client { impl Client where - T: BeaconChainTypes + InitialiseBeaconChain + Clone, + T: BeaconChainTypes + Clone, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( @@ -60,39 +77,110 @@ where executor: &TaskExecutor, ) -> error::Result { let store = Arc::new(store); - let seconds_per_slot = eth2_config.spec.seconds_per_slot; + let milliseconds_per_slot = eth2_config.spec.milliseconds_per_slot; - // Load a `BeaconChain` from the store, or create a new one if it does not exist. - let beacon_chain = Arc::new(T::initialise_beacon_chain( - store, - &client_config, - eth2_config.spec.clone(), - log.clone(), - )?); + let spec = ð2_config.spec.clone(); - if beacon_chain.read_slot_clock().is_none() { + let beacon_chain_builder = match &client_config.beacon_chain_start_method { + BeaconChainStartMethod::Resume => { + info!( + log, + "Starting beacon chain"; + "method" => "resume" + ); + BeaconChainBuilder::from_store(spec.clone(), log.clone()) + } + BeaconChainStartMethod::Mainnet => { + crit!(log, "No mainnet beacon chain startup specification."); + return Err("Mainnet launch is not yet announced.".into()); + } + BeaconChainStartMethod::RecentGenesis { + validator_count, + minutes, + } => { + info!( + log, + "Starting beacon chain"; + "validator_count" => validator_count, + "minutes" => minutes, + "method" => "recent" + ); + BeaconChainBuilder::recent_genesis( + &generate_deterministic_keypairs(*validator_count), + *minutes, + spec.clone(), + log.clone(), + )? + } + BeaconChainStartMethod::Generated { + validator_count, + genesis_time, + } => { + info!( + log, + "Starting beacon chain"; + "validator_count" => validator_count, + "genesis_time" => genesis_time, + "method" => "quick" + ); + BeaconChainBuilder::quick_start( + *genesis_time, + &generate_deterministic_keypairs(*validator_count), + spec.clone(), + log.clone(), + )? + } + BeaconChainStartMethod::Yaml { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "yaml" + ); + BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? + } + BeaconChainStartMethod::Ssz { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "ssz" + ); + BeaconChainBuilder::ssz_state(file, spec.clone(), log.clone())? + } + BeaconChainStartMethod::Json { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "json" + ); + BeaconChainBuilder::json_state(file, spec.clone(), log.clone())? + } + BeaconChainStartMethod::HttpBootstrap { server, port } => { + info!( + log, + "Starting beacon chain"; + "port" => port, + "server" => server, + "method" => "bootstrap" + ); + BeaconChainBuilder::http_bootstrap(server, spec.clone(), log.clone())? + } + }; + + let eth1_backend = T::Eth1Chain::new(String::new()).map_err(|e| format!("{:?}", e))?; + + let beacon_chain: Arc> = Arc::new( + beacon_chain_builder + .build(store, eth1_backend) + .map_err(error::Error::from)?, + ); + + if beacon_chain.slot().is_err() { panic!("Cannot start client before genesis!") } - // Block starting the client until we have caught the state up to the current slot. - // - // If we don't block here we create an initial scenario where we're unable to process any - // blocks and we're basically useless. - { - let state_slot = beacon_chain.head().beacon_state.slot; - let wall_clock_slot = beacon_chain.read_slot_clock().unwrap(); - let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap(); - info!( - log, - "BeaconState cache init"; - "state_slot" => state_slot, - "wall_clock_slot" => wall_clock_slot, - "slots_since_genesis" => slots_since_genesis, - "catchup_distance" => wall_clock_slot - state_slot, - ); - } - do_state_catchup(&beacon_chain, &log); - let network_config = &client_config.network; let (network, network_send) = NetworkService::new(beacon_chain.clone(), network_config, executor, log.clone())?; @@ -118,6 +206,7 @@ where beacon_chain.clone(), network.clone(), client_config.db_path().expect("unable to read datadir"), + eth2_config.clone(), &log, ) { Ok(s) => Some(s), @@ -131,11 +220,11 @@ where }; let (slot_timer_exit_signal, exit) = exit_future::signal(); - if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { + if let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() { // set up the validator work interval - start at next slot and proceed every slot let interval = { // Set the interval to start at the next slot, and every slot after - let slot_duration = Duration::from_secs(seconds_per_slot); + let slot_duration = Duration::from_millis(milliseconds_per_slot); //TODO: Handle checked add correctly Interval::new(Instant::now() + duration_to_next_slot, slot_duration) }; @@ -146,7 +235,7 @@ where exit.until( interval .for_each(move |_| { - do_state_catchup(&chain, &log); + log_new_slot(&chain, &log); Ok(()) }) @@ -176,35 +265,19 @@ impl Drop for Client { } } -fn do_state_catchup(chain: &Arc>, log: &slog::Logger) { - // Only attempt to `catchup_state` if we can read the slot clock. - if let Some(current_slot) = chain.read_slot_clock() { - let state_catchup_result = chain.catchup_state(); +fn log_new_slot(chain: &Arc>, log: &slog::Logger) { + let best_slot = chain.head().beacon_block.slot; + let latest_block_root = chain.head().beacon_block_root; - let best_slot = chain.head().beacon_block.slot; - let latest_block_root = chain.head().beacon_block_root; - - let common = o!( + if let Ok(current_slot) = chain.slot() { + info!( + log, + "Slot start"; "skip_slots" => current_slot.saturating_sub(best_slot), "best_block_root" => format!("{}", latest_block_root), "best_block_slot" => best_slot, "slot" => current_slot, - ); - - if let Err(e) = state_catchup_result { - error!( - log, - "State catchup failed"; - "error" => format!("{:?}", e), - common - ) - } else { - info!( - log, - "Slot start"; - common - ) - } + ) } else { error!( log, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index b81da0991f..9cce6300df 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -682,13 +682,19 @@ impl ImportManager { ); } } - BlockProcessingOutcome::FinalizedSlot => { + BlockProcessingOutcome::WouldRevertFinalizedSlot { .. } => { trace!( self.log, "Finalized or earlier block processed"; "outcome" => format!("{:?}", outcome), ); // block reached our finalized slot or was earlier, move to the next block } + BlockProcessingOutcome::GenesisBlock => { + trace!( + self.log, "Genesis block was processed"; + "outcome" => format!("{:?}", outcome), + ); + } _ => { trace!( self.log, "InvalidBlock"; diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 573ac9dd1f..6745ceb628 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -387,7 +387,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), + "current_slot" => format!("{:?}", self.chain.slot()), "requested" => req.count, "returned" => blocks.len(), ); @@ -525,6 +525,12 @@ impl NetworkContext { } pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + warn!( + &self.log, + "Disconnecting peer"; + "reason" => format!("{:?}", reason), + "peer_id" => format!("{:?}", peer_id), + ); self.send_rpc_request(None, peer_id, RPCRequest::Goodbye(reason)) // TODO: disconnect peers. } diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index cc69faec90..863ea04da4 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -14,9 +14,12 @@ store = { path = "../store" } version = { path = "../version" } serde = { version = "1.0", features = ["derive"] } serde_json = "^1.0" +serde_yaml = "0.8" slog = "^2.2.3" slog-term = "^2.4.0" slog-async = "^2.3.0" +eth2_ssz = { path = "../../eth2/utils/ssz" } +eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } clap = "2.32.0" @@ -28,6 +31,7 @@ exit-future = "0.1.3" tokio = "0.1.17" url = "2.0" lazy_static = "1.3.0" +eth2_config = { path = "../../eth2/utils/eth2_config" } lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } slot_clock = { path = "../../eth2/utils/slot_clock" } hex = "0.3.2" diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 11e1446d99..1da640baf1 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -1,8 +1,9 @@ -use super::{success_response, ApiResult}; +use super::{success_response, ApiResult, ResponseBuilder}; use crate::{helpers::*, ApiError, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use serde::Serialize; +use ssz_derive::Encode; use std::sync::Arc; use store::Store; use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; @@ -59,7 +60,7 @@ pub fn get_head(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } -#[derive(Serialize)] +#[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct BlockResponse { pub root: Hash256, @@ -103,11 +104,7 @@ pub fn get_block(req: Request) -> ApiResult beacon_block: block, }; - let json: String = serde_json::to_string(&response).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize BlockResponse: {:?}", e)) - })?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req).body(&response) } /// HTTP handler to return a `BeaconBlock` root at a given `slot`. @@ -132,11 +129,7 @@ pub fn get_block_root(req: Request) -> ApiR /// HTTP handler to return the `Fork` of the current head. pub fn get_fork(req: Request) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - + let beacon_chain = get_beacon_chain_from_request::(&req)?; let chain_head = beacon_chain.head(); let json: String = serde_json::to_string(&chain_head.beacon_state.fork).map_err(|e| { @@ -146,7 +139,19 @@ pub fn get_fork(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } -#[derive(Serialize)] +/// HTTP handler to return a `BeaconState` at a given `root` or `slot`. +/// +/// Will not return a state if the request slot is in the future. Will return states higher than +/// the current head by skipping slots. +pub fn get_genesis_state(req: Request) -> ApiResult { + let beacon_chain = get_beacon_chain_from_request::(&req)?; + + let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; + + ResponseBuilder::new(&req).body(&state) +} + +#[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct StateResponse { pub root: Hash256, @@ -207,11 +212,7 @@ pub fn get_state(req: Request) -> ApiResult beacon_state: state, }; - let json: String = serde_json::to_string(&response).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize StateResponse: {:?}", e)) - })?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req).body(&response) } /// HTTP handler to return a `BeaconState` root at a given `slot`. diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs index 90ac0821b1..c262a128a1 100644 --- a/beacon_node/rest_api/src/config.rs +++ b/beacon_node/rest_api/src/config.rs @@ -16,7 +16,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: true, // rest_api enabled by default + enabled: true, listen_address: Ipv4Addr::new(127, 0, 0, 1), port: 5052, } @@ -25,8 +25,8 @@ impl Default for Config { impl Config { pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("api") { - self.enabled = true; + if args.is_present("no-api") { + self.enabled = false; } if let Some(rpc_address) = args.value_of("api-address") { diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index d47afc02cd..e15c27df5e 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -109,8 +109,8 @@ pub fn state_root_at_slot( ) -> Result { let head_state = &beacon_chain.head().beacon_state; let current_slot = beacon_chain - .read_slot_clock() - .ok_or_else(|| ApiError::ServerError("Unable to read slot clock".to_string()))?; + .slot() + .map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?; // There are four scenarios when obtaining a state for a given slot: // @@ -177,12 +177,18 @@ pub fn get_beacon_chain_from_request( let beacon_chain = req .extensions() .get::>>() - .ok_or_else(|| { - ApiError::ServerError("Request is missing the beacon chain extension".into()) - })?; - let _ = beacon_chain - .ensure_state_caches_are_built() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".into()))?; + + let _state_now = beacon_chain + .state_now() + .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))? + .maybe_as_mut_ref() + .ok_or(ApiError::ServerError( + "Unable to get mutable BeaconState".into(), + ))? + .build_all_caches(&beacon_chain.spec) .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + Ok(beacon_chain.clone()) } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 2c7b90e3f0..4dbbdda515 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -8,15 +8,18 @@ mod helpers; mod metrics; mod network; mod node; +mod response_builder; mod spec; mod url_query; mod validator; use beacon_chain::{BeaconChain, BeaconChainTypes}; use client_network::Service as NetworkService; +use eth2_config::Eth2Config; use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; +use response_builder::ResponseBuilder; use slog::{info, o, warn}; use std::ops::Deref; use std::path::PathBuf; @@ -80,6 +83,7 @@ pub fn start_server( beacon_chain: Arc>, network_service: Arc>, db_path: PathBuf, + eth2_config: Eth2Config, log: &slog::Logger, ) -> Result { let log = log.new(o!("Service" => "Api")); @@ -101,12 +105,14 @@ pub fn start_server( // Clone our stateful objects, for use in service closure. let server_log = log.clone(); let server_bc = beacon_chain.clone(); + let eth2_config = Arc::new(eth2_config); let service = move || { let log = server_log.clone(); let beacon_chain = server_bc.clone(); let db_path = db_path.clone(); let network_service = network_service.clone(); + let eth2_config = eth2_config.clone(); // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { @@ -119,6 +125,8 @@ pub fn start_server( req.extensions_mut().insert::(db_path.clone()); req.extensions_mut() .insert::>>(network_service.clone()); + req.extensions_mut() + .insert::>(eth2_config.clone()); let path = req.uri().path().to_string(); @@ -184,6 +192,7 @@ pub fn start_server( (&Method::GET, "/beacon/state/current_finalized_checkpoint") => { beacon::get_current_finalized_checkpoint::(req) } + (&Method::GET, "/beacon/state/genesis") => beacon::get_genesis_state::(req), //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances // Methods for bootstrap and checking configuration @@ -192,6 +201,7 @@ pub fn start_server( (&Method::GET, "/spec/deposit_contract") => { helpers::implementation_pending_response(req) } + (&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::(req), (&Method::GET, "/metrics") => metrics::get_prometheus::(req), diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs new file mode 100644 index 0000000000..9b8819996c --- /dev/null +++ b/beacon_node/rest_api/src/response_builder.rs @@ -0,0 +1,50 @@ +use super::{ApiError, ApiResult}; +use http::header; +use hyper::{Body, Request, Response, StatusCode}; +use serde::Serialize; +use ssz::Encode; + +pub enum Encoding { + JSON, + SSZ, + YAML, +} + +pub struct ResponseBuilder { + encoding: Encoding, +} + +impl ResponseBuilder { + pub fn new(req: &Request) -> Self { + let encoding = match req.headers().get(header::CONTENT_TYPE) { + Some(h) if h == "application/ssz" => Encoding::SSZ, + Some(h) if h == "application/yaml" => Encoding::YAML, + _ => Encoding::JSON, + }; + + Self { encoding } + } + + pub fn body(self, item: &T) -> ApiResult { + let body: Body = match self.encoding { + Encoding::JSON => Body::from(serde_json::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as JSON: {:?}", + e + )) + })?), + Encoding::SSZ => Body::from(item.as_ssz_bytes()), + Encoding::YAML => Body::from(serde_yaml::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as YAML: {:?}", + e + )) + })?), + }; + + Response::builder() + .status(StatusCode::OK) + .body(Body::from(body)) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } +} diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index d0c8e4368d..86d1c227d3 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -1,6 +1,7 @@ use super::{success_response, ApiResult}; use crate::ApiError; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_config::Eth2Config; use hyper::{Body, Request}; use std::sync::Arc; use types::EthSpec; @@ -18,6 +19,19 @@ pub fn get_spec(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } +/// HTTP handler to return the full Eth2Config object. +pub fn get_eth2_config(req: Request) -> ApiResult { + let eth2_config = req + .extensions() + .get::>() + .ok_or_else(|| ApiError::ServerError("Eth2Config extension missing".to_string()))?; + + let json: String = serde_json::to_string(eth2_config.as_ref()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Eth2Config: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + /// HTTP handler to return the full spec object. pub fn get_slots_per_epoch(_req: Request) -> ApiResult { let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch()) diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 229d846749..010e49305b 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -206,8 +206,8 @@ pub fn get_new_attestation(req: Request) -> .ok_or(ApiError::InvalidQueryParams("No validator duties could be found for the requested validator. Cannot provide valid attestation.".into()))?; // Check that we are requesting an attestation during the slot where it is relevant. - let present_slot = beacon_chain.read_slot_clock().ok_or(ApiError::ServerError( - "Beacon node is unable to determine present slot, either the state isn't generated or the chain hasn't begun.".into() + let present_slot = beacon_chain.slot().map_err(|e| ApiError::ServerError( + format!("Beacon node is unable to determine present slot, either the state isn't generated or the chain hasn't begun. {:?}", e) ))?; if val_duty.slot != present_slot { return Err(ApiError::InvalidQueryParams(format!("Validator is only able to request an attestation during the slot they are allocated. Current slot: {:?}, allocated slot: {:?}", head_state.slot, val_duty.slot))); @@ -257,7 +257,7 @@ pub fn get_new_attestation(req: Request) -> })?; let attestation_data = beacon_chain - .produce_attestation_data(shard) + .produce_attestation_data(shard, current_slot.into()) .map_err(|e| ApiError::ServerError(format!("Could not produce an attestation: {:?}", e)))?; let attestation: Attestation = Attestation { diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 68d3829ee7..f4b49049ae 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -14,7 +14,7 @@ use slog::{error, info, trace, warn}; use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; -use types::Attestation; +use types::{Attestation, Slot}; #[derive(Clone)] pub struct AttestationServiceInstance { @@ -37,49 +37,13 @@ impl AttestationService for AttestationServiceInstance { req.get_slot() ); - // verify the slot, drop lock on state afterwards - { - let slot_requested = req.get_slot(); - // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); - - // Start by performing some checks - // Check that the AttestationData is for the current slot (otherwise it will not be valid) - if slot_requested > state.slot.as_u64() { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::OutOfRange, - Some( - "AttestationData request for a slot that is in the future.".to_string(), - ), - )) - .map_err(move |e| { - error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e) - }); - return ctx.spawn(f); - } - // currently cannot handle past slots. TODO: Handle this case - else if slot_requested < state.slot.as_u64() { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::InvalidArgument, - Some("AttestationData request for a slot that is in the past.".to_string()), - )) - .map_err(move |e| { - error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e) - }); - return ctx.spawn(f); - } - } - // Then get the AttestationData from the beacon chain let shard = req.get_shard(); - let attestation_data = match self.chain.produce_attestation_data(shard) { + let slot_requested = req.get_slot(); + let attestation_data = match self + .chain + .produce_attestation_data(shard, Slot::from(slot_requested)) + { Ok(v) => v, Err(e) => { // Could not produce an attestation diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index ed02cfc177..346d7e263e 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -34,8 +34,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { trace!(self.log, "Generating a beacon block"; "req" => format!("{:?}", req)); // decode the request - // TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336 - let _requested_slot = Slot::from(req.get_slot()); + let requested_slot = Slot::from(req.get_slot()); let randao_reveal = match Signature::from_ssz_bytes(req.get_randao_reveal()) { Ok(reveal) => reveal, Err(_) => { @@ -51,7 +50,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; - let produced_block = match self.chain.produce_current_block(randao_reveal) { + let produced_block = match self.chain.produce_block(randao_reveal, requested_slot) { Ok((block, _state)) => block, Err(e) => { // could not produce a block @@ -67,6 +66,11 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; + assert_eq!( + produced_block.slot, requested_slot, + "should produce at the requested slot" + ); + let mut block = BeaconBlockProto::new(); block.set_ssz(ssz_encode(&produced_block)); diff --git a/beacon_node/rpc/src/config.rs b/beacon_node/rpc/src/config.rs index 0f031ddc60..47eff6824a 100644 --- a/beacon_node/rpc/src/config.rs +++ b/beacon_node/rpc/src/config.rs @@ -16,7 +16,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: false, // rpc disabled by default + enabled: true, listen_address: Ipv4Addr::new(127, 0, 0, 1), port: 5051, } @@ -25,8 +25,8 @@ impl Default for Config { impl Config { pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("rpc") { - self.enabled = true; + if args.is_present("no-grpc") { + self.enabled = false; } if let Some(rpc_address) = args.value_of("rpc-address") { diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index eef0092921..59902ff43f 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -80,7 +80,12 @@ pub fn start_server( let spawn_rpc = { server.start(); for &(ref host, port) in server.bind_addrs() { - info!(log, "gRPC listening on {}:{}", host, port); + info!( + log, + "gRPC API started"; + "port" => port, + "host" => host, + ); } rpc_exit.and_then(move |_| { info!(log, "RPC Server shutting down"); diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 080c828a78..abc1cffc55 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -28,36 +28,38 @@ impl ValidatorService for ValidatorServiceInstance { let validators = req.get_validators(); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); - let spec = &self.chain.spec; - // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); let epoch = Epoch::from(req.get_epoch()); + let slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); + + let mut state = if let Ok(state) = self.chain.state_at_slot(slot) { + state.clone() + } else { + let log_clone = self.log.clone(); + let f = sink + .fail(RpcStatus::new( + RpcStatusCode::FailedPrecondition, + Some("No state".to_string()), + )) + .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); + return ctx.spawn(f); + }; + + let _ = state.build_all_caches(&self.chain.spec); + + assert_eq!( + state.current_epoch(), + epoch, + "Retrieved state should be from the same epoch" + ); + let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); - let relative_epoch = - match RelativeEpoch::from_epoch(state.slot.epoch(T::EthSpec::slots_per_epoch()), epoch) - { - Ok(v) => v, - Err(e) => { - // incorrect epoch - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::FailedPrecondition, - Some(format!("Invalid epoch: {:?}", e)), - )) - .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); - return ctx.spawn(f); - } - }; - let validator_proposers: Result, _> = epoch .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| state.get_beacon_proposer_index(slot, relative_epoch, &spec)) + .map(|slot| { + state.get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.chain.spec) + }) .collect(); let validator_proposers = match validator_proposers { Ok(v) => v, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs new file mode 100644 index 0000000000..6a13a9aaec --- /dev/null +++ b/beacon_node/src/config.rs @@ -0,0 +1,550 @@ +use clap::ArgMatches; +use client::{BeaconChainStartMethod, ClientConfig, Eth1BackendMethod, Eth2Config}; +use eth2_config::{read_from_file, write_to_file}; +use lighthouse_bootstrap::Bootstrapper; +use rand::{distributions::Alphanumeric, Rng}; +use slog::{crit, info, warn, Logger}; +use std::fs; +use std::net::Ipv4Addr; +use std::path::{Path, PathBuf}; + +pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; +pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; +pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; + +type Result = std::result::Result; +type Config = (ClientConfig, Eth2Config); + +/// Gets the fully-initialized global client and eth2 configuration objects. +/// +/// The top-level `clap` arguments should be provied as `cli_args`. +/// +/// The output of this function depends primarily upon the given `cli_args`, however it's behaviour +/// may be influenced by other external services like the contents of the file system or the +/// response of some remote server. +pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { + let mut builder = ConfigBuilder::new(cli_args, log)?; + + if let Some(server) = cli_args.value_of("eth1-server") { + builder.set_eth1_backend_method(Eth1BackendMethod::Web3 { + server: server.into(), + }) + } else { + builder.set_eth1_backend_method(Eth1BackendMethod::Interop) + } + + match cli_args.subcommand() { + ("testnet", Some(sub_cmd_args)) => { + process_testnet_subcommand(&mut builder, sub_cmd_args, log)? + } + // No sub-command assumes a resume operation. + _ => { + info!( + log, + "Resuming from existing datadir"; + "path" => format!("{:?}", builder.client_config.data_dir) + ); + + // If no primary subcommand was given, start the beacon chain from an existing + // database. + builder.set_beacon_chain_start_method(BeaconChainStartMethod::Resume); + + // Whilst there is no large testnet or mainnet force the user to specify how they want + // to start a new chain (e.g., from a genesis YAML file, another node, etc). + if !builder.client_config.data_dir.exists() { + return Err( + "No datadir found. To start a new beacon chain, see `testnet --help`. \ + Use `--datadir` to specify a different directory" + .into(), + ); + } + + // If the `testnet` command was not provided, attempt to load an existing datadir and + // continue with an existing chain. + builder.load_from_datadir()?; + } + }; + + builder.build(cli_args) +} + +/// Process the `testnet` CLI subcommand arguments, updating the `builder`. +fn process_testnet_subcommand( + builder: &mut ConfigBuilder, + cli_args: &ArgMatches, + log: &Logger, +) -> Result<()> { + if cli_args.is_present("random-datadir") { + builder.set_random_datadir()?; + } + + if cli_args.is_present("force") { + builder.clean_datadir()?; + } + + let is_bootstrap = cli_args.subcommand_name() == Some("bootstrap"); + + if let Some(path_string) = cli_args.value_of("eth2-config") { + if is_bootstrap { + return Err("Cannot supply --eth2-config when using bootstrap".to_string()); + } + + let path = path_string + .parse::() + .map_err(|e| format!("Unable to parse eth2-config path: {:?}", e))?; + builder.load_eth2_config(path)?; + } else { + builder.update_spec_from_subcommand(&cli_args)?; + } + + if let Some(slot_time) = cli_args.value_of("slot-time") { + if is_bootstrap { + return Err("Cannot supply --slot-time flag whilst using bootstrap.".into()); + } + + let slot_time = slot_time + .parse::() + .map_err(|e| format!("Unable to parse slot-time: {:?}", e))?; + + builder.set_slot_time(slot_time); + } + + if let Some(path_string) = cli_args.value_of("client-config") { + let path = path_string + .parse::() + .map_err(|e| format!("Unable to parse client config path: {:?}", e))?; + builder.load_client_config(path)?; + } + + info!( + log, + "Creating new datadir"; + "path" => format!("{:?}", builder.client_config.data_dir) + ); + + // When using the testnet command we listen on all addresses. + builder.set_listen_addresses("0.0.0.0".into())?; + warn!(log, "All services listening on 0.0.0.0"); + + // Start matching on the second subcommand (e.g., `testnet bootstrap ...`). + match cli_args.subcommand() { + ("bootstrap", Some(cli_args)) => { + let server = cli_args + .value_of("server") + .ok_or_else(|| "No bootstrap server specified")?; + let port: Option = cli_args + .value_of("libp2p-port") + .and_then(|s| s.parse::().ok()); + + builder.import_bootstrap_libp2p_address(server, port)?; + builder.import_bootstrap_eth2_config(server)?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::HttpBootstrap { + server: server.to_string(), + port, + }) + } + ("recent", Some(cli_args)) => { + let validator_count = cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator_count specified")? + .parse::() + .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + + let minutes = cli_args + .value_of("minutes") + .ok_or_else(|| "No recent genesis minutes supplied")? + .parse::() + .map_err(|e| format!("Unable to parse minutes: {:?}", e))?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::RecentGenesis { + validator_count, + minutes, + }) + } + ("quick", Some(cli_args)) => { + let validator_count = cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator_count specified")? + .parse::() + .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + + let genesis_time = cli_args + .value_of("genesis_time") + .ok_or_else(|| "No genesis time supplied")? + .parse::() + .map_err(|e| format!("Unable to parse genesis time: {:?}", e))?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::Generated { + validator_count, + genesis_time, + }) + } + ("file", Some(cli_args)) => { + let file = cli_args + .value_of("file") + .ok_or_else(|| "No filename specified")? + .parse::() + .map_err(|e| format!("Unable to parse filename: {:?}", e))?; + + let format = cli_args + .value_of("format") + .ok_or_else(|| "No file format specified")?; + + let start_method = match format { + "yaml" => BeaconChainStartMethod::Yaml { file }, + "ssz" => BeaconChainStartMethod::Ssz { file }, + "json" => BeaconChainStartMethod::Json { file }, + other => return Err(format!("Unknown genesis file format: {}", other)), + }; + + builder.set_beacon_chain_start_method(start_method) + } + (cmd, Some(_)) => { + return Err(format!( + "Invalid valid method specified: {}. See 'testnet --help'.", + cmd + )) + } + _ => return Err("No testnet method specified. See 'testnet --help'.".into()), + }; + + builder.write_configs_to_new_datadir()?; + + Ok(()) +} + +/// Allows for building a set of configurations based upon `clap` arguments. +struct ConfigBuilder<'a> { + log: &'a Logger, + eth2_config: Eth2Config, + client_config: ClientConfig, +} + +impl<'a> ConfigBuilder<'a> { + /// Create a new builder with default settings. + pub fn new(cli_args: &'a ArgMatches, log: &'a Logger) -> Result { + // Read the `--datadir` flag. + // + // If it's not present, try and find the home directory (`~`) and push the default data + // directory onto it. + let data_dir: PathBuf = cli_args + .value_of("datadir") + .map(|string| PathBuf::from(string)) + .or_else(|| { + dirs::home_dir().map(|mut home| { + home.push(DEFAULT_DATA_DIR); + home + }) + }) + .ok_or_else(|| "Unable to find a home directory for the datadir".to_string())?; + + let mut client_config = ClientConfig::default(); + client_config.data_dir = data_dir; + + Ok(Self { + log, + eth2_config: Eth2Config::minimal(), + client_config, + }) + } + + /// Clears any configuration files that would interfere with writing new configs. + /// + /// Moves the following files in `data_dir` into a backup directory: + /// + /// - Client config + /// - Eth2 config + /// - The entire database directory + pub fn clean_datadir(&mut self) -> Result<()> { + let backup_dir = { + let mut s = String::from("backup_"); + s.push_str(&random_string(6)); + self.client_config.data_dir.join(s) + }; + + fs::create_dir_all(&backup_dir) + .map_err(|e| format!("Unable to create config backup dir: {:?}", e))?; + + let move_to_backup_dir = |path: &Path| -> Result<()> { + let file_name = path + .file_name() + .ok_or_else(|| "Invalid path found during datadir clean (no filename).")?; + + let mut new = path.to_path_buf(); + new.pop(); + new.push(backup_dir.clone()); + new.push(file_name); + + let _ = fs::rename(path, new); + + Ok(()) + }; + + move_to_backup_dir(&self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; + move_to_backup_dir(&self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; + + if let Some(db_path) = self.client_config.db_path() { + move_to_backup_dir(&db_path)?; + } + + Ok(()) + } + + /// Sets the method for starting the beacon chain. + pub fn set_beacon_chain_start_method(&mut self, method: BeaconChainStartMethod) { + self.client_config.beacon_chain_start_method = method; + } + + /// Sets the method for starting the beacon chain. + pub fn set_eth1_backend_method(&mut self, method: Eth1BackendMethod) { + self.client_config.eth1_backend_method = method; + } + + /// Import the libp2p address for `server` into the list of bootnodes in `self`. + /// + /// If `port` is `Some`, it is used as the port for the `Multiaddr`. If `port` is `None`, + /// attempts to connect to the `server` via HTTP and retrieve it's libp2p listen port. + pub fn import_bootstrap_libp2p_address( + &mut self, + server: &str, + port: Option, + ) -> Result<()> { + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; + + if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr(port) { + info!( + self.log, + "Estimated bootstrapper libp2p address"; + "multiaddr" => format!("{:?}", server_multiaddr) + ); + + self.client_config + .network + .libp2p_nodes + .push(server_multiaddr); + } else { + warn!( + self.log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); + }; + + Ok(()) + } + + /// Set the config data_dir to be an random directory. + /// + /// Useful for easily spinning up ephemeral testnets. + pub fn set_random_datadir(&mut self) -> Result<()> { + self.client_config + .data_dir + .push(format!("random_{}", random_string(6))); + self.client_config.network.network_dir = self.client_config.data_dir.join("network"); + + Ok(()) + } + + /// Imports an `Eth2Config` from `server`, returning an error if this fails. + pub fn import_bootstrap_eth2_config(&mut self, server: &str) -> Result<()> { + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; + + self.update_eth2_config(bootstrapper.eth2_config()?); + + Ok(()) + } + + fn update_eth2_config(&mut self, eth2_config: Eth2Config) { + self.eth2_config = eth2_config; + } + + fn set_slot_time(&mut self, milliseconds_per_slot: u64) { + self.eth2_config.spec.milliseconds_per_slot = milliseconds_per_slot; + } + + /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. + /// + /// Returns an error if the `--spec` flag is not present in the given `cli_args`. + pub fn update_spec_from_subcommand(&mut self, cli_args: &ArgMatches) -> Result<()> { + // Re-initialise the `Eth2Config`. + // + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let eth2_config = match cli_args.value_of("spec") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + Some("interop") => Eth2Config::interop(), + _ => return Err("Unable to determine specification type.".into()), + }; + + self.client_config.spec_constants = cli_args + .value_of("spec") + .expect("Guarded by prior match statement") + .to_string(); + self.eth2_config = eth2_config; + + Ok(()) + } + + /// Writes the configs in `self` to `self.data_dir`. + /// + /// Returns an error if `self.data_dir` already exists. + pub fn write_configs_to_new_datadir(&mut self) -> Result<()> { + let db_exists = self + .client_config + .db_path() + .map(|d| d.exists()) + .unwrap_or_else(|| false); + + // Do not permit creating a new config when the datadir exists. + if db_exists { + return Err("Database already exists. See `-f` or `-r` in `testnet --help`".into()); + } + + // Create `datadir` and any non-existing parent directories. + fs::create_dir_all(&self.client_config.data_dir).map_err(|e| { + crit!(self.log, "Failed to initialize data dir"; "error" => format!("{}", e)); + format!("{}", e) + })?; + + let client_config_file = self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME); + if client_config_file.exists() { + return Err(format!( + "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", + CLIENT_CONFIG_FILENAME + )); + } else { + // Write the onfig to a TOML file in the datadir. + write_to_file( + self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME), + &self.client_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; + } + + let eth2_config_file = self.client_config.data_dir.join(ETH2_CONFIG_FILENAME); + if eth2_config_file.exists() { + return Err(format!( + "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", + ETH2_CONFIG_FILENAME + )); + } else { + // Write the config to a TOML file in the datadir. + write_to_file( + self.client_config.data_dir.join(ETH2_CONFIG_FILENAME), + &self.eth2_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + } + + Ok(()) + } + + /// Attempts to load the client and eth2 configs from `self.data_dir`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_from_datadir(&mut self) -> Result<()> { + // Check to ensure the datadir exists. + // + // For now we return an error. In the future we may decide to boot a default (e.g., + // public testnet or mainnet). + if !self.client_config.data_dir.exists() { + return Err( + "No datadir found. Either create a new testnet or specify a different `--datadir`." + .into(), + ); + } + + // If there is a path to a databse in the config, ensure it exists. + if !self + .client_config + .db_path() + .map(|path| path.exists()) + .unwrap_or_else(|| true) + { + return Err( + "No database found in datadir. Use 'testnet -f' to overwrite the existing \ + datadir, or specify a different `--datadir`." + .into(), + ); + } + + self.load_eth2_config(self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; + self.load_client_config(self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; + + Ok(()) + } + + /// Attempts to load the client config from `path`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_client_config(&mut self, path: PathBuf) -> Result<()> { + self.client_config = read_from_file::(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; + + Ok(()) + } + + /// Attempts to load the eth2 config from `path`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_eth2_config(&mut self, path: PathBuf) -> Result<()> { + self.eth2_config = read_from_file::(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; + + Ok(()) + } + + /// Sets all listening addresses to the given `addr`. + pub fn set_listen_addresses(&mut self, addr: String) -> Result<()> { + let addr = addr + .parse::() + .map_err(|e| format!("Unable to parse default listen address: {:?}", e))?; + + self.client_config.network.listen_address = addr.clone().into(); + self.client_config.rpc.listen_address = addr.clone(); + self.client_config.rest_api.listen_address = addr.clone(); + + Ok(()) + } + + /// Consumes self, returning the configs. + /// + /// The supplied `cli_args` should be the base-level `clap` cli_args (i.e., not a subcommand + /// cli_args). + pub fn build(mut self, cli_args: &ArgMatches) -> Result { + self.eth2_config.apply_cli_args(cli_args)?; + self.client_config + .apply_cli_args(cli_args, &mut self.log.clone())?; + + if let Some(bump) = cli_args.value_of("port-bump") { + let bump = bump + .parse::() + .map_err(|e| format!("Unable to parse port bump: {}", e))?; + + self.client_config.network.libp2p_port += bump; + self.client_config.network.discovery_port += bump; + self.client_config.rpc.port += bump; + self.client_config.rest_api.port += bump; + } + + if self.eth2_config.spec_constants != self.client_config.spec_constants { + crit!(self.log, "Specification constants do not match."; + "client_config" => format!("{}", self.client_config.spec_constants), + "eth2_config" => format!("{}", self.eth2_config.spec_constants) + ); + return Err("Specification constant mismatch".into()); + } + + Ok((self.client_config, self.eth2_config)) + } +} + +fn random_string(len: usize) -> String { + rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(len) + .collect::() +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 0eb5b83b43..fab75ea4ea 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,12 +1,10 @@ +mod config; mod run; -use clap::{App, Arg}; -use client::{ClientConfig, Eth2Config}; +use clap::{App, Arg, SubCommand}; +use config::get_configs; use env_logger::{Builder, Env}; -use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; -use std::fs; -use std::path::PathBuf; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; @@ -30,6 +28,7 @@ fn main() { .value_name("DIR") .help("Data directory for keys and databases.") .takes_value(true) + .global(true) ) .arg( Arg::with_name("logfile") @@ -44,22 +43,34 @@ fn main() { .value_name("NETWORK-DIR") .help("Data directory for network keys.") .takes_value(true) + .global(true) ) /* * Network parameters. */ + .arg( + Arg::with_name("port-bump") + .long("port-bump") + .short("b") + .value_name("INCREMENT") + .help("Sets all listening TCP/UDP ports to default values, but with each port increased by \ + INCREMENT. Useful when starting multiple nodes on a single machine. Using increments \ + in multiples of 10 is recommended.") + .takes_value(true), + ) .arg( Arg::with_name("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") - .takes_value(true), + .takes_value(true) ) .arg( Arg::with_name("port") .long("port") .value_name("PORT") .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") + .conflicts_with("port-bump") .takes_value(true), ) .arg( @@ -81,6 +92,7 @@ fn main() { .long("disc-port") .value_name("PORT") .help("The discovery UDP port.") + .conflicts_with("port-bump") .takes_value(true), ) .arg( @@ -108,10 +120,9 @@ fn main() { * gRPC parameters. */ .arg( - Arg::with_name("rpc") - .long("rpc") - .value_name("RPC") - .help("Enable the RPC server.") + Arg::with_name("no-grpc") + .long("no-grpc") + .help("Disable the gRPC server.") .takes_value(false), ) .arg( @@ -125,14 +136,14 @@ fn main() { Arg::with_name("rpc-port") .long("rpc-port") .help("Listen port for RPC endpoint.") + .conflicts_with("port-bump") .takes_value(true), ) /* Client related arguments */ .arg( - Arg::with_name("api") - .long("api") - .value_name("API") - .help("Enable the RESTful HTTP API server.") + Arg::with_name("no-api") + .long("no-api") + .help("Disable RESTful HTTP API server.") .takes_value(false), ) .arg( @@ -147,9 +158,20 @@ fn main() { .long("api-port") .value_name("APIPORT") .help("Set the listen TCP port for the RESTful HTTP API server.") + .conflicts_with("port-bump") .takes_value(true), ) + /* + * Eth1 Integration + */ + .arg( + Arg::with_name("eth1-server") + .long("eth1-server") + .value_name("SERVER") + .help("Specifies the server for a web3 connection to the Eth1 chain.") + .takes_value(true) + ) /* * Database parameters. */ @@ -160,25 +182,7 @@ fn main() { .help("Type of database to use.") .takes_value(true) .possible_values(&["disk", "memory"]) - .default_value("memory"), - ) - /* - * Specification/testnet params. - */ - .arg( - Arg::with_name("default-spec") - .long("default-spec") - .value_name("TITLE") - .short("default-spec") - .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) - ) - .arg( - Arg::with_name("recent-genesis") - .long("recent-genesis") - .short("r") - .help("When present, genesis will be within 30 minutes prior. Only for testing"), + .default_value("disk"), ) /* * Logging. @@ -200,14 +204,137 @@ fn main() { .takes_value(true), ) /* - * Bootstrap. + * The "testnet" sub-command. + * + * Allows for creating a new datadir with testnet-specific configs. */ - .arg( - Arg::with_name("bootstrap") - .long("bootstrap") - .value_name("HTTP_SERVER") - .help("Load the genesis state and libp2p address from the HTTP API of another Lighthouse node.") - .takes_value(true) + .subcommand(SubCommand::with_name("testnet") + .about("Create a new Lighthouse datadir using a testnet strategy.") + .arg( + Arg::with_name("spec") + .short("s") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type. Only effective when creating a new datadir.") + .takes_value(true) + .required(true) + .possible_values(&["mainnet", "minimal", "interop"]) + .default_value("minimal") + ) + .arg( + Arg::with_name("eth2-config") + .long("eth2-config") + .value_name("TOML_FILE") + .help("A existing eth2_spec TOML file (e.g., eth2_spec.toml).") + .takes_value(true) + .conflicts_with("spec") + ) + .arg( + Arg::with_name("client-config") + .long("client-config") + .value_name("TOML_FILE") + .help("An existing beacon_node TOML file (e.g., beacon_node.toml).") + .takes_value(true) + ) + .arg( + Arg::with_name("random-datadir") + .long("random-datadir") + .short("r") + .help("If present, append a random string to the datadir path. Useful for fast development \ + iteration.") + ) + .arg( + Arg::with_name("force") + .long("force") + .short("f") + .help("If present, will create new config and database files and move the any existing to a \ + backup directory.") + .conflicts_with("random-datadir") + ) + .arg( + Arg::with_name("slot-time") + .long("slot-time") + .short("t") + .value_name("MILLISECONDS") + .help("Defines the slot time when creating a new testnet.") + ) + /* + * `boostrap` + * + * Start a new node by downloading genesis and network info from another node via the + * HTTP API. + */ + .subcommand(SubCommand::with_name("bootstrap") + .about("Connects to the given HTTP server, downloads a genesis state and attempts to peer with it.") + .arg(Arg::with_name("server") + .value_name("HTTP_SERVER") + .required(true) + .default_value("http://localhost:5052") + .help("A HTTP server, with a http:// prefix")) + .arg(Arg::with_name("libp2p-port") + .short("p") + .long("port") + .value_name("TCP_PORT") + .help("A libp2p listen port used to peer with the bootstrap server. This flag is useful \ + when port-fowarding is used: you may connect using a different port than \ + the one the server is immediately listening on.")) + ) + /* + * `recent` + * + * Start a new node, with a specified number of validators with a genesis time in the last + * 30-minutes. + */ + .subcommand(SubCommand::with_name("recent") + .about("Creates a new genesis state where the genesis time was at the previous \ + MINUTES boundary (e.g., when MINUTES == 30; 12:00, 12:30, 13:00, etc.)") + .arg(Arg::with_name("validator_count") + .value_name("VALIDATOR_COUNT") + .required(true) + .help("The number of validators in the genesis state")) + .arg(Arg::with_name("minutes") + .long("minutes") + .short("m") + .value_name("MINUTES") + .required(true) + .default_value("15") + .help("The maximum number of minutes that will have elapsed before genesis")) + ) + /* + * `quick` + * + * Start a new node, specifying the number of validators and genesis time + */ + .subcommand(SubCommand::with_name("quick") + .about("Creates a new genesis state from the specified validator count and genesis time. \ + Compatible with the `quick-start genesis` defined in the eth2.0-pm repo.") + .arg(Arg::with_name("validator_count") + .value_name("VALIDATOR_COUNT") + .required(true) + .help("The number of validators in the genesis state")) + .arg(Arg::with_name("genesis_time") + .value_name("UNIX_EPOCH_SECONDS") + .required(true) + .help("The genesis time for the given state.")) + ) + /* + * `yaml` + * + * Start a new node, using a genesis state loaded from a YAML file + */ + .subcommand(SubCommand::with_name("file") + .about("Creates a new datadir where the genesis state is read from YAML. May fail to parse \ + a file that was generated to a different spec than that specified by --spec.") + .arg(Arg::with_name("format") + .value_name("FORMAT") + .required(true) + .possible_values(&["yaml", "ssz", "json"]) + .help("The encoding of the state in the file.")) + .arg(Arg::with_name("file") + .value_name("YAML_FILE") + .required(true) + .help("A YAML file from which to read the state")) + ) ) .get_matches(); @@ -227,143 +354,24 @@ fn main() { _ => unreachable!("guarded by clap"), }; - let mut log = slog::Logger::root(drain.fuse(), o!()); + let log = slog::Logger::root(drain.fuse(), o!()); warn!( log, "Ethereum 2.0 is pre-release. This software is experimental." ); - let data_dir = match matches - .value_of("datadir") - .and_then(|v| Some(PathBuf::from(v))) - { - Some(v) => v, - None => { - // use the default - let mut default_dir = match dirs::home_dir() { - Some(v) => v, - None => { - crit!(log, "Failed to find a home directory"); - return; - } - }; - default_dir.push(DEFAULT_DATA_DIR); - default_dir - } - }; - - // create the directory if needed - match fs::create_dir_all(&data_dir) { - Ok(_) => {} - Err(e) => { - crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e)); - return; - } - } - - let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - - // Attempt to load the `ClientConfig` from disk. + // Load the process-wide configuration. // - // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::(client_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = ClientConfig::default(); - if let Err(e) = write_to_file(client_config_path, &default) { - crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); - return; - } - default - } + // May load this from disk or create a new configuration, depending on the CLI flags supplied. + let (client_config, eth2_config) = match get_configs(&matches, &log) { + Ok(configs) => configs, Err(e) => { - crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); + crit!(log, "Failed to load configuration"; "error" => e); return; } }; - // Ensure the `data_dir` in the config matches that supplied to the CLI. - client_config.data_dir = data_dir.clone(); - - // Update the client config with any CLI args. - match client_config.apply_cli_args(&matches, &mut log) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); - return; - } - }; - - let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); - - // Initialise the `Eth2Config`. - // - // If a CLI parameter is set, overwrite any config file present. - // If a parameter is not set, use either the config file present or default to minimal. - let cli_config = match matches.value_of("default-spec") { - Some("mainnet") => Some(Eth2Config::mainnet()), - Some("minimal") => Some(Eth2Config::minimal()), - Some("interop") => Some(Eth2Config::interop()), - _ => None, - }; - // if a CLI flag is specified, write the new config if it doesn't exist, - // otherwise notify the user that the file will not be written. - let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { - Ok(config) => config, - Err(e) => { - crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); - return; - } - }; - - let mut eth2_config = { - if let Some(cli_config) = cli_config { - if eth2_config_from_file.is_none() { - // write to file if one doesn't exist - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - } else { - warn!( - log, - "Eth2Config file exists. Configuration file is ignored, using default" - ); - } - cli_config - } else { - // CLI config not specified, read from disk - match eth2_config_from_file { - Some(config) => config, - None => { - // set default to minimal - let eth2_config = Eth2Config::minimal(); - if let Err(e) = write_to_file(eth2_config_path, ð2_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - eth2_config - } - } - } - }; - - // Update the eth2 config with any CLI flags. - match eth2_config.apply_cli_args(&matches) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s); - return; - } - }; - - // check to ensure the spec constants between the client and eth2_config match - if eth2_config.spec_constants != client_config.spec_constants { - crit!(log, "Specification constants do not match."; "client_config" => format!("{}", client_config.spec_constants), "eth2_config" => format!("{}", eth2_config.spec_constants)); - return; - } - // Start the node using a `tokio` executor. match run::run_beacon_node(client_config, eth2_config, &log) { Ok(_) => {} diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index f88cb7460b..d036ef0c40 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,6 +1,6 @@ use client::{ - error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config, - InitialiseBeaconChain, + error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth1BackendMethod, + Eth2Config, }; use futures::sync::oneshot; use futures::Future; @@ -44,63 +44,36 @@ pub fn run_beacon_node( info!( log, - "BeaconNode init"; - "p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address), - "data_dir" => format!("{:?}", other_client_config.data_dir()), - "network_dir" => format!("{:?}", other_client_config.network.network_dir), - "spec_constants" => &spec_constants, + "Starting beacon node"; + "p2p_listen_address" => format!("{}", &other_client_config.network.listen_address), "db_type" => &other_client_config.db_type, + "spec_constants" => &spec_constants, ); + macro_rules! run_client { + ($store: ty, $eth_spec: ty) => { + run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ) + }; + } + + if let Eth1BackendMethod::Web3 { .. } = client_config.eth1_backend_method { + return Err("Starting from web3 backend is not supported for interop.".into()); + } + match (db_type.as_str(), spec_constants.as_str()) { - ("disk", "minimal") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "minimal") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("disk", "mainnet") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "mainnet") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("disk", "interop") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "interop") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), + ("disk", "minimal") => run_client!(DiskStore, MinimalEthSpec), + ("disk", "mainnet") => run_client!(DiskStore, MainnetEthSpec), + ("disk", "interop") => run_client!(DiskStore, InteropEthSpec), + ("memory", "minimal") => run_client!(MemoryStore, MinimalEthSpec), + ("memory", "mainnet") => run_client!(MemoryStore, MainnetEthSpec), + ("memory", "interop") => run_client!(MemoryStore, InteropEthSpec), (db_type, spec) => { error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); Err("Unknown specification and/or db_type.".into()) @@ -118,7 +91,7 @@ fn run( log: &slog::Logger, ) -> error::Result<()> where - T: BeaconChainTypes + InitialiseBeaconChain + Clone, + T: BeaconChainTypes + Clone, T::Store: OpenDatabase, { let store = T::Store::open_database(&db_path)?; diff --git a/book/.gitignore b/book/.gitignore new file mode 100644 index 0000000000..7585238efe --- /dev/null +++ b/book/.gitignore @@ -0,0 +1 @@ +book diff --git a/book/book.toml b/book/book.toml new file mode 100644 index 0000000000..829c7b99c2 --- /dev/null +++ b/book/book.toml @@ -0,0 +1,6 @@ +[book] +authors = ["Paul Hauner"] +language = "en" +multilingual = false +src = "src" +title = "Lighthouse" diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md new file mode 100644 index 0000000000..4ffa694cd4 --- /dev/null +++ b/book/src/SUMMARY.md @@ -0,0 +1,10 @@ +# Summary + +* [Introduction](./intro.md) +* [Development Environment](./setup.md) +* [Simple Local Testnet](./simple-testnet.md) +* [Interop](./interop.md) + * [Environment](./interop-environment.md) + * [CLI Overview](./interop-cli.md) + * [Scenarios](./interop-scenarios.md) + * [Cheat-sheet](./interop-cheat-sheet.md) diff --git a/book/src/interop-cheat-sheet.md b/book/src/interop-cheat-sheet.md new file mode 100644 index 0000000000..ea7794c338 --- /dev/null +++ b/book/src/interop-cheat-sheet.md @@ -0,0 +1,139 @@ +# Interop Cheat-sheet + +This document contains a list of tips and tricks that may be useful during +interop testing. + +- When starting a beacon node: + - [Specify a boot node by multiaddr](#boot-node-multiaddr) + - [Specify a boot node by ENR](#boot-node-enr) + - [Avoid port clashes when starting multiple nodes](#port-bump) + - [Specify a custom slot time](#slot-time) +- Using the beacon node HTTP API: + - [Curl a node's ENR](#http-enr) + - [Curl a node's connected peers](#http-peer-ids) + - [Curl a node's local peer id](#http-peer-id) + - [Curl a node's listening multiaddrs](#http-listen-addresses) + - [Curl a node's beacon chain head](#http-head) + - [Curl a node's finalized checkpoint](#http-finalized) + +## Category: CLI + +The `--help` command provides detail on the CLI interface. Here are some +interop-specific CLI commands. + + +### Specify a boot node by multiaddr + +You can specify a static list of multiaddrs when booting Lighthouse using +the `--libp2p-addresses` command. + +#### Example: + +``` +$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 +``` + + +### Specify a boot node by ENR + +You can specify a static list of Discv5 addresses when booting Lighthouse using +the `--boot-nodes` command. + +#### Example: + +``` +$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 +``` + + +### Avoid port clashes when starting nodes + +Starting a second Lighthouse node on the same machine will fail due to TCP/UDP +port collisions. Use the `-b` (`--port-bump`) flag to increase all listening +ports by some `n`. + +#### Example: + +Increase all ports by `10` (using multiples of `10` is recommended). + +``` +$ ./beacon_node -b 10 +``` + + +### Start a testnet with a custom slot time + +Lighthouse can run at quite low slot times when there are few validators (e.g., +`500 ms` slot times should be fine for 8 validators). + +#### Example + +The `-t` (`--slot-time`) flag specifies the milliseconds per slot. + +``` +$ ./beacon_node testnet -t 500 recent 8 +``` + +> Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with +> this flag. + +## Category: HTTP API + +Examples assume there is a Lighthouse node exposing a HTTP API on +`localhost:5052`. Responses are JSON. + + +### Get the node's ENR + +``` +$ curl localhost:5052/network/enr + +"-IW4QFyf1VlY5pZs0xZuvKMRZ9_cdl9WMCDAAJXZiZiuGcfRYoU40VPrYDLQj5prneJIz3zcbTjHp9BbThc-yiymJO8HgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5"% +``` + + +### Get a list of connected peer ids + +``` +$ curl localhost:5052/network/peers + +["QmeMFRTWfo3KbVG7dEBXGhyRMa29yfmnJBXW84rKuGEhuL"]% +``` + + +### Get the node's peer id + +``` +curl localhost:5052/network/peer_id + +"QmRD1qs2AqNNRdBcGHUGpUGkpih5cmdL32mhh22Sy79xsJ"% +``` + + +### Get the list of listening libp2p addresses + +Lists all the libp2p multiaddrs that the node is listening on. + +``` +curl localhost:5052/network/listen_addresses + +["/ip4/127.0.0.1/tcp/9000","/ip4/192.168.1.121/tcp/9000","/ip4/172.17.0.1/tcp/9000","/ip4/172.42.0.1/tcp/9000","/ip6/::1/tcp/9000","/ip6/fdd3:c293:1bc::203/tcp/9000","/ip6/fdd3:c293:1bc:0:9aa9:b2ea:c610:44db/tcp/9000"]% +``` + + +### Get the node's beacon chain head + +``` +curl localhost:5052/beacon/head + +{"slot":0,"block_root":"0x827bf71805540aa13f6d8c7d18b41b287b2094a4d7a28cbb8deb061dbf5df4f5","state_root":"0x90a78d73294bc9c7519a64e1912161be0e823eb472012ff54204e15a4d717fa5"}% +``` + + +### Get the node's finalized checkpoint + +``` +curl localhost:5052/beacon/latest_finalized_checkpoint + +{"epoch":0,"root":"0x0000000000000000000000000000000000000000000000000000000000000000"}% +``` diff --git a/book/src/interop-cli.md b/book/src/interop-cli.md new file mode 100644 index 0000000000..3dad845f34 --- /dev/null +++ b/book/src/interop-cli.md @@ -0,0 +1,29 @@ +# Interop CLI Overview + +The Lighthouse CLI has two primary tasks: + +- **Resuming** an existing database with `$ ./beacon_node`. +- **Creating** a new testnet database using `$ ./beacon_node testnet`. + +_See [Scenarios](./interop-scenarios.md) for methods we've anticipated will be +used interop._ + +## Creating a new database + +There are several methods for creating a new beacon node database: + +- `quick`: using the `(validator_client, genesis_time)` tuple. +- `recent`: as above but `genesis_time` is set to the start of some recent time + window. +- `file`: loads the genesis file from disk in one of multiple formats. +- `bootstrap`: a Lighthouse-specific method where we connect to a running node + and download it's specification and genesis state via the HTTP API. + +See `$ ./beacon_node testnet --help` for more detail. + +## Resuming from an existing database + +Once a database has been created, it can be resumed by running `$ ./beacon_node`. + +Presently, this command will fail if no existing database is found. You must +use the `$ ./beacon_node testnet` command to create a new database. diff --git a/book/src/interop-environment.md b/book/src/interop-environment.md new file mode 100644 index 0000000000..6d3568e29e --- /dev/null +++ b/book/src/interop-environment.md @@ -0,0 +1,30 @@ +# Interop Environment + +All that is required for inter-op is a built and tested [development +environment](./setup.md). + +## Repositories + +You will only require the [sigp/lighthouse](http://github.com/sigp/lighthouse) +library. + +To allow for faster build/test iterations we will use the +[`interop`](https://github.com/sigp/lighthouse/tree/interop) branch of +[sigp/lighthouse](https://github.com/sigp/lighthouse/tree/interop) for +September 2019 interop. **Please use ensure you `git checkout interop` after +cloning the repo.** + +## File System + +When lighthouse boots, it will create the following +directories: + +- `~/.lighthouse`: database and configuration for the beacon node. +- `~/.lighthouse-validator`: database and configuration for the validator + client. + +After building the binaries with `cargo build --release --all`, there will be a +`target/release` directory in the root of the Lighthouse repository. This is +where the `beacon_node` and `validator_client` binaries are located. + +You do not need to create any of these directories manually. diff --git a/book/src/interop-scenarios.md b/book/src/interop-scenarios.md new file mode 100644 index 0000000000..dc87893622 --- /dev/null +++ b/book/src/interop-scenarios.md @@ -0,0 +1,98 @@ +# Interop Scenarios + +Here we demonstrate some expected interop scenarios. + +All scenarios assume a working [development environment](./setup.md) and +commands are based in the `target/release` directory (this is the build dir for +`cargo`). + +Additional functions can be found in the [interop +cheat-sheet](./interop-cheat-sheet.md). + +### Table of contents + +- [Starting from a`validator_count, genesis_time` tuple](#quick-start) +- [Starting a node from a genesis state file](#state-file) +- [Starting a validator client](#val-client) +- [Exporting a genesis state file](#export) from a running Lighthouse + node + + + +### Start beacon node given a validator count and genesis_time + + +To start a brand-new beacon node (with no history) use: + +``` +$ ./beacon_node testnet -f quick 8 1567222226 +``` +> Notes: +> +> - This method conforms the ["Quick-start +genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) +method in the `ethereum/eth2.0-pm` repository. +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is the validator count and `1567222226` is the genesis time. +> - See `$ ./beacon_node testnet quick --help` for more configuration options. + + +### Start Beacon Node given a genesis state file + +A genesis state can be read from file using the `testnet file` subcommand. +There are three supported formats: + +- `ssz` (default) +- `json` +- `yaml` + +Start a new node using `/tmp/genesis.ssz` as the genesis state: + +``` +$ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - See `$ ./beacon_node testnet file --help` for more configuration options. +> - The `--spec` flag is required to allow SSZ parsing of fixed-length lists. + + +### Start an auto-configured validator client + +To start a brand-new validator client (with no history) use: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command dictates that the [interop keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) +> will be used. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. +> - The validator client will operate very unsafely in `testnet` mode, happily +> swapping between chains and creating double-votes. + + +### Exporting a genesis file + +Genesis states can downloaded from a running Lighthouse node via the HTTP API. Three content-types are supported: + +- `application/json` +- `application/yaml` +- `application/ssz` + +Using `curl`, a genesis state can be downloaded to `/tmp/genesis.ssz`: + +``` +$ curl --header "Content-Type: application/ssz" "localhost:5052/beacon/state/genesis" -o /tmp/genesis.ssz +``` diff --git a/book/src/interop-tips.md b/book/src/interop-tips.md new file mode 100644 index 0000000000..0d52e896ac --- /dev/null +++ b/book/src/interop-tips.md @@ -0,0 +1 @@ +# Interop Tips & Tricks diff --git a/book/src/interop.md b/book/src/interop.md new file mode 100644 index 0000000000..cb119d59da --- /dev/null +++ b/book/src/interop.md @@ -0,0 +1,11 @@ +# Lighthouse Interop Guide + +This guide is intended for other Ethereum 2.0 client developers performing +inter-operability testing with Lighthouse. + +## Chapters + +- Read about the required [development environment](./interop-environment.md). +- Get an [overview](./interop-cli.md) of the Lighthouse CLI. +- See how we expect to handle some [interop scenarios](./interop-scenarios.md). +- See the [interop cheat-sheet](./interop-cheat-sheet.md) for useful CLI tips. diff --git a/book/src/intro.md b/book/src/intro.md new file mode 100644 index 0000000000..ccf867a54e --- /dev/null +++ b/book/src/intro.md @@ -0,0 +1,27 @@ +# Lighthouse Documentation + +[![Build Status]][Build Link] [![Doc Status]][Doc Link] [![Chat Badge]][Chat Link] + +[Build Status]: https://gitlab.sigmaprime.io/sigp/lighthouse/badges/master/build.svg +[Build Link]: https://gitlab.sigmaprime.io/sigp/lighthouse/pipelines +[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da +[Chat Link]: https://discord.gg/cyAszAh +[Doc Status]:https://img.shields.io/badge/rust--docs-master-orange +[Doc Link]: http://lighthouse-docs.sigmaprime.io/ + +Lighthouse is an **Ethereum 2.0 client** that connects to other Ethereum 2.0 +clients to form a resilient and decentralized proof-of-stake blockchain. + +It is written in Rust, maintained by Sigma Prime and funded by the Ethereum +Foundation, Consensys and other individuals and organisations. + +## Developer Resources + +Documentation is presently targeted at **researchers and developers**. It +assumes significant prior knowledge of Ethereum 2.0. + +Topics: + +- Get started with [development environment setup](./setup.md). +- See the [interop docs](./interop.md). +- [Run a simple testnet](./simple-testnet.md) in Only Three CLI Commandsâ„¢. diff --git a/book/src/setup.md b/book/src/setup.md new file mode 100644 index 0000000000..e53ca93d83 --- /dev/null +++ b/book/src/setup.md @@ -0,0 +1,65 @@ +# Development Environment Setup + +Follow this guide to get a Lighthouse development environment up-and-running. + +See the [Quick instructions](#quick-instructions) for a summary or the +[Detailed instructions](#detailed-instructions) for clarification. + +## Quick instructions + +1. Install Rust + Cargo with [rustup](https://rustup.rs/). +1. Install build dependencies using your package manager. + - `$ clang protobuf libssl-dev cmake git-lfs` + - Ensure [git-lfs](https://git-lfs.github.com/) is installed with `git lfs + install`. +1. Clone the [sigp/lighthouse](https://github.com/sigp/lighthouse), ensuring to + **initialize submodules**. +1. In the root of the repo, run the tests with `cargo test --all --release`. +1. Then, build the binaries with `cargo build --all --release`. +1. Lighthouse is now fully built and tested. + +_Note: first-time compilation may take several minutes._ + +## Detailed instructions + +A fully-featured development environment can be achieved with the following +steps: + + 1. Install [rustup](https://rustup.rs/). + 1. Use the command `rustup show` to get information about the Rust + installation. You should see that the active tool-chain is the stable + version. + - Updates can be performed using` rustup update`, Lighthouse generally + requires a recent version of Rust. + 1. Install build dependencies (Arch packages are listed here, your + distribution will likely be similar): + - `clang`: required by RocksDB. + - `protobuf`: required for protobuf serialization (gRPC) + - `libssl-dev`: also gRPC + - `cmake`: required for building protobuf + - `git-lfs`: The Git extension for [Large File + Support](https://git-lfs.github.com/) (required for Ethereum Foundation + test vectors). + 1. Clone the repository with submodules: `git clone --recursive + https://github.com/sigp/lighthouse`. If you're already cloned the repo, + ensure testing submodules are present: `$ git submodule init; git + submodule update` + 1. Change directory to the root of the repository. + 1. Run the test suite with `cargo test --all --release`. The build and test + process can take several minutes. If you experience any failures on + `master`, please raise an + [issue](https://github.com/sigp/lighthouse/issues). + +### Notes: + +Lighthouse targets Rust `stable` but generally runs on `nightly` too. + +#### Note for Windows users: + +Perl may also be required to build lighthouse. You can install [Strawberry +Perl](http://strawberryperl.com/), or alternatively use a choco install command +`choco install strawberryperl`. + +Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues +compiling in Windows. You can specify a known working version by editing +version in `protos/Cargo.toml` section to `protoc-grpcio = "<=0.3.0"`. diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md new file mode 100644 index 0000000000..b6fa19d6fe --- /dev/null +++ b/book/src/simple-testnet.md @@ -0,0 +1,81 @@ +# Simple Local Testnet + +You can setup a local, two-node testnet in **Only Three CLI Commandsâ„¢**. + +Follow the [Quick instructions](#tldr) version if you're confident, or see +[Detailed instructions](#detail) for more. + + +## Quick instructions + +Setup a development environment, build the project and navigate to the +`target/release` directory. + +1. Start the first node: `$ ./beacon_node testnet -f recent 8` +1. Start a validator client: `$ ./validator_client testnet -b insecure 0 8` +1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` + +_Repeat #3 to add more nodes._ + +## Detailed instructions + +First, setup a Lighthouse development environment and navigate to the +`target/release` directory (this is where the binaries are located). + +## Starting the Beacon Node + +Start a new node (creating a fresh database and configuration in `~/.lighthouse`), using: + +``` +$ ./beacon_node testnet -f recent 8 +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is number of validators with deposits in the genesis state. +> - See `$ ./beacon_node testnet recent --help` for more configuration options, +> including `minimal`/`mainnet` specification. + +## Starting the Validator Client + +In a new terminal window, start the validator client with: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command uses predictable, well-known private keys. Since +> this is just a local testnet, these are fine. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. + +## Adding another Beacon Node + +You may connect another (non-validating) node to your local network using the +lighthouse `bootstrap` command. + +In a new terminal terminal, run: + + +``` +$ ./beacon_node -b 10 testnet -r bootstrap +``` + +> Notes: +> +> - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of +> the new node to `10` higher. Your first node's HTTP server was at TCP +> `5052` but this one will be at `5062`. +> - The `-r` flag creates a new data directory with a random string appended +> (avoids data directory collisions between nodes). +> - The default bootstrap HTTP address is `http://localhost:5052`. The new node +> will download configuration via HTTP before starting sync via libp2p. +> - See `$ ./beacon_node testnet bootstrap --help` for more configuration. diff --git a/book/src/testnets.md b/book/src/testnets.md new file mode 100644 index 0000000000..180673fb36 --- /dev/null +++ b/book/src/testnets.md @@ -0,0 +1,10 @@ +# Testnets + +Lighthouse does not offer a public testnet _yet_. In the meantime, it's easy to +start a local testnet: + +- [Run a simple testnet](testnets.html) in Only Three CLI Commandsâ„¢. +- Developers of other Eth2 clients should see the [interop guide](interop.html). +- The [sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repo + contains a `docker-compose` setup that runs a multi-node network with + built-in metrics and monitoring dashboards, all from your local machine. diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index 95cd0679c1..167cd36eaf 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -46,4 +46,10 @@ pub trait LmdGhost: Send + Sync { /// Returns the latest message for a given validator index. fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)>; + + /// Runs an integrity verification function on fork choice algorithm. + /// + /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, + /// `Err(description)` otherwise. + fn verify_integrity(&self) -> Result<()>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index deda02e1fd..73fab13bfd 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -43,16 +43,6 @@ impl fmt::Debug for ThreadSafeReducedTree { } } -impl ThreadSafeReducedTree -where - T: Store, - E: EthSpec, -{ - pub fn verify_integrity(&self) -> std::result::Result<(), String> { - self.core.read().verify_integrity() - } -} - impl LmdGhost for ThreadSafeReducedTree where T: Store, @@ -80,7 +70,7 @@ where fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> SuperResult<()> { self.core .write() - .add_weightless_node(block.slot, block_hash) + .maybe_add_weightless_node(block.slot, block_hash) .map_err(|e| format!("process_block failed: {:?}", e)) } @@ -113,6 +103,10 @@ where fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { self.core.read().latest_message(validator_index) } + + fn verify_integrity(&self) -> std::result::Result<(), String> { + self.core.read().verify_integrity() + } } struct ReducedTree { @@ -163,15 +157,7 @@ where /// The given `new_root` must be in the block tree (but not necessarily in the reduced tree). /// Any nodes which are not a descendant of `new_root` will be removed from the store. pub fn update_root(&mut self, new_slot: Slot, new_root: Hash256) -> Result<()> { - if !self.nodes.contains_key(&new_root) { - let node = Node { - block_hash: new_root, - voters: vec![], - ..Node::default() - }; - - self.add_node(node)?; - } + self.maybe_add_weightless_node(new_slot, new_root)?; self.retain_subtree(self.root.0, new_root)?; @@ -247,7 +233,7 @@ where // // In this case, we add a weightless node at `start_block_root`. if !self.nodes.contains_key(&start_block_root) { - self.add_weightless_node(start_block_slot, start_block_root)?; + self.maybe_add_weightless_node(start_block_slot, start_block_root)?; }; let _root_weight = self.update_weight(start_block_root, weight_fn)?; @@ -325,51 +311,53 @@ where /// become redundant and removed from the reduced tree. fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> { if let Some(vote) = *self.latest_votes.get(validator_index) { - self.get_mut_node(vote.hash)?.remove_voter(validator_index); - let node = self.get_node(vote.hash)?.clone(); + if self.nodes.contains_key(&vote.hash) { + self.get_mut_node(vote.hash)?.remove_voter(validator_index); + let node = self.get_node(vote.hash)?.clone(); - if let Some(parent_hash) = node.parent_hash { - if node.has_votes() || node.children.len() > 1 { - // A node with votes or more than one child is never removed. - } else if node.children.len() == 1 { - // A node which has only one child may be removed. - // - // Load the child of the node and set it's parent to be the parent of this - // node (viz., graft the node's child to the node's parent) - let child = self.get_mut_node(node.children[0])?; - child.parent_hash = node.parent_hash; + if let Some(parent_hash) = node.parent_hash { + if node.has_votes() || node.children.len() > 1 { + // A node with votes or more than one child is never removed. + } else if node.children.len() == 1 { + // A node which has only one child may be removed. + // + // Load the child of the node and set it's parent to be the parent of this + // node (viz., graft the node's child to the node's parent) + let child = self.get_mut_node(node.children[0])?; + child.parent_hash = node.parent_hash; - // Graft the parent of this node to it's child. - if let Some(parent_hash) = node.parent_hash { - let parent = self.get_mut_node(parent_hash)?; - parent.replace_child(node.block_hash, node.children[0])?; + // Graft the parent of this node to it's child. + if let Some(parent_hash) = node.parent_hash { + let parent = self.get_mut_node(parent_hash)?; + parent.replace_child(node.block_hash, node.children[0])?; + } + + self.nodes.remove(&vote.hash); + } else if node.children.is_empty() { + // Remove the to-be-deleted node from it's parent. + if let Some(parent_hash) = node.parent_hash { + self.get_mut_node(parent_hash)? + .remove_child(node.block_hash)?; + } + + self.nodes.remove(&vote.hash); + + // A node which has no children may be deleted and potentially it's parent + // too. + self.maybe_delete_node(parent_hash)?; + } else { + // It is impossible for a node to have a number of children that is not 0, 1 or + // greater than one. + // + // This code is strictly unnecessary, however we keep it for readability. + unreachable!(); } - - self.nodes.remove(&vote.hash); - } else if node.children.is_empty() { - // Remove the to-be-deleted node from it's parent. - if let Some(parent_hash) = node.parent_hash { - self.get_mut_node(parent_hash)? - .remove_child(node.block_hash)?; - } - - self.nodes.remove(&vote.hash); - - // A node which has no children may be deleted and potentially it's parent - // too. - self.maybe_delete_node(parent_hash)?; } else { - // It is impossible for a node to have a number of children that is not 0, 1 or - // greater than one. - // - // This code is strictly unnecessary, however we keep it for readability. - unreachable!(); + // A node without a parent is the genesis/finalized node and should never be removed. } - } else { - // A node without a parent is the genesis/finalized node and should never be removed. - } - self.latest_votes.insert(validator_index, Some(vote)); + self.latest_votes.insert(validator_index, Some(vote)); + } } Ok(()) @@ -384,25 +372,30 @@ where /// - it does not have any votes. fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> { let should_delete = { - let node = self.get_node(hash)?.clone(); + if let Ok(node) = self.get_node(hash) { + let node = node.clone(); - if let Some(parent_hash) = node.parent_hash { - if (node.children.len() == 1) && !node.has_votes() { - let child_hash = node.children[0]; + if let Some(parent_hash) = node.parent_hash { + if (node.children.len() == 1) && !node.has_votes() { + let child_hash = node.children[0]; - // Graft the single descendant `node` to the `parent` of node. - self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); + // Graft the single descendant `node` to the `parent` of node. + self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); - // Detach `node` from `parent`, replacing it with `child`. - self.get_mut_node(parent_hash)? - .replace_child(hash, child_hash)?; + // Detach `node` from `parent`, replacing it with `child`. + self.get_mut_node(parent_hash)? + .replace_child(hash, child_hash)?; - true + true + } else { + false + } } else { + // A node without a parent is the genesis node and should not be deleted. false } } else { - // A node without a parent is the genesis node and should not be deleted. + // No need to delete a node that does not exist. false } }; @@ -430,7 +423,7 @@ where Ok(()) } - fn add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { + fn maybe_add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { if slot > self.root_slot() && !self.nodes.contains_key(&hash) { let node = Node { block_hash: hash, @@ -477,6 +470,7 @@ where // descendant of both `node` and `prev_in_tree`. if self .iter_ancestors(child_hash)? + .take_while(|(_, slot)| *slot >= self.root_slot()) .any(|(ancestor, _slot)| ancestor == node.block_hash) { let child = self.get_mut_node(child_hash)?; @@ -562,6 +556,7 @@ where fn find_prev_in_tree(&mut self, hash: Hash256) -> Option { self.iter_ancestors(hash) .ok()? + .take_while(|(_, slot)| *slot >= self.root_slot()) .find(|(root, _slot)| self.nodes.contains_key(root)) .and_then(|(root, _slot)| Some(root)) } @@ -569,8 +564,12 @@ where /// For the two given block roots (`a_root` and `b_root`), find the first block they share in /// the tree. Viz, find the block that these two distinct blocks forked from. fn find_highest_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result { - let mut a_iter = self.iter_ancestors(a_root)?; - let mut b_iter = self.iter_ancestors(b_root)?; + let mut a_iter = self + .iter_ancestors(a_root)? + .take_while(|(_, slot)| *slot >= self.root_slot()); + let mut b_iter = self + .iter_ancestors(b_root)? + .take_while(|(_, slot)| *slot >= self.root_slot()); // Combines the `next()` fns on the `a_iter` and `b_iter` and returns the roots of two // blocks at the same slot, or `None` if we have gone past genesis or the root of this tree. diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index 4c79a704eb..49e9ff738a 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -4,7 +4,8 @@ extern crate lazy_static; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, + generate_deterministic_keypairs, AttestationStrategy, + BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, }; use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use rand::{prelude::*, rngs::StdRng}; @@ -51,7 +52,7 @@ struct ForkedHarness { impl ForkedHarness { /// A new standard instance of with constant parameters. pub fn new() -> Self { - let harness = BeaconChainHarness::new(VALIDATOR_COUNT); + let harness = BeaconChainHarness::new(generate_deterministic_keypairs(VALIDATOR_COUNT)); // Move past the zero slot. harness.advance_slot(); diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 0badf38072..bb64c3ca26 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -16,9 +16,9 @@ use state_processing::per_block_processing::errors::{ }; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_attestation_for_block_inclusion, - verify_attestation_for_state, verify_attester_slashing, verify_exit, - verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, - verify_transfer_time_independent_only, VerifySignatures, + verify_attester_slashing, verify_exit, verify_exit_time_independent_only, + verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, + VerifySignatures, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use std::marker::PhantomData; diff --git a/eth2/state_processing/src/per_block_processing/tests.rs b/eth2/state_processing/src/per_block_processing/tests.rs index cf64dc85e1..f419d5fae7 100644 --- a/eth2/state_processing/src/per_block_processing/tests.rs +++ b/eth2/state_processing/src/per_block_processing/tests.rs @@ -1,4 +1,5 @@ #![cfg(all(test, not(feature = "fake_crypto")))] + use super::block_processing_builder::BlockProcessingBuilder; use super::errors::*; use crate::{per_block_processing, BlockSignatureStrategy}; diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 43b66f3edf..a7390c8505 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -1,3 +1,5 @@ +#![cfg(not(feature = "fake_crypto"))] + use state_processing::{ per_block_processing, test_utils::BlockBuilder, BlockProcessingError, BlockSignatureStrategy, }; diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index 36cfc39eca..95d7a03174 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -31,3 +31,4 @@ tree_hash_derive = "0.2" [dev-dependencies] env_logger = "0.6.0" +serde_json = "^1.0" diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index 0e76942dde..f589b3d3ef 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -120,6 +120,13 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { fn epochs_per_historical_vector() -> usize { Self::EpochsPerHistoricalVector::to_usize() } + + /// Returns the `SLOTS_PER_ETH1_VOTING_PERIOD` constant for this specification. + /// + /// Spec v0.8.1 + fn slots_per_eth1_voting_period() -> usize { + Self::EpochsPerHistoricalVector::to_usize() + } } /// Macro to inherit some type values from another EthSpec. diff --git a/eth2/types/src/beacon_state/committee_cache/tests.rs b/eth2/types/src/beacon_state/committee_cache/tests.rs index 28e9d92f85..4c17d3f96c 100644 --- a/eth2/types/src/beacon_state/committee_cache/tests.rs +++ b/eth2/types/src/beacon_state/committee_cache/tests.rs @@ -9,7 +9,7 @@ fn default_values() { let cache = CommitteeCache::default(); assert_eq!(cache.is_initialized_at(Epoch::new(0)), false); - assert_eq!(cache.active_validator_indices(), &[]); + assert!(&cache.active_validator_indices().is_empty()); assert_eq!(cache.get_crosslink_committee_for_shard(0), None); assert_eq!(cache.get_attestation_duties(0), None); assert_eq!(cache.active_validator_count(), 0); diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 9dec626d44..d59e0db0ac 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -58,7 +58,7 @@ pub struct ChainSpec { /* * Time parameters */ - pub seconds_per_slot: u64, + pub milliseconds_per_slot: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub activation_exit_delay: u64, @@ -158,7 +158,7 @@ impl ChainSpec { /* * Time parameters */ - seconds_per_slot: 6, + milliseconds_per_slot: 6_000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), activation_exit_delay: 4, @@ -221,7 +221,7 @@ impl ChainSpec { let boot_nodes = vec![]; Self { - seconds_per_slot: 12, + milliseconds_per_slot: 12_000, target_committee_size: 4, shuffle_round_count: 10, network_id: 13, diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 084ff98e7c..62ca6b3af3 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -182,7 +182,7 @@ macro_rules! impl_display { &self, record: &slog::Record, key: slog::Key, - serializer: &mut slog::Serializer, + serializer: &mut dyn slog::Serializer, ) -> slog::Result { slog::Value::serialize(&self.0, record, key, serializer) } diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 98f8409538..cf8c9ec8ea 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -94,7 +94,7 @@ impl TestingBeaconStateBuilder { /// Creates the builder from an existing set of keypairs. pub fn from_keypairs(keypairs: Vec, spec: &ChainSpec) -> Self { let validator_count = keypairs.len(); - let starting_balance = 32_000_000_000; + let starting_balance = spec.max_effective_balance; debug!( "Building {} Validator objects from keypairs...", @@ -123,8 +123,10 @@ impl TestingBeaconStateBuilder { .collect::>() .into(); + let genesis_time = 1567052589; // 29 August, 2019; + let mut state = BeaconState::new( - spec.min_genesis_time, + genesis_time, Eth1Data { deposit_root: Hash256::zero(), deposit_count: 0, diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index 6c72b520fa..b972934276 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -39,15 +39,15 @@ impl TestingProposerSlashingBuilder { ..header_1.clone() }; + let epoch = slot.epoch(T::slots_per_epoch()); + header_1.signature = { let message = header_1.signed_root(); - let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; header_2.signature = { let message = header_2.signed_root(); - let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; diff --git a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs index 172b142ef9..a687eb978f 100644 --- a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,5 +1,5 @@ use crate::*; -use eth2_interop_keypairs::be_private_key; +use eth2_interop_keypairs::keypair; use log::debug; use rayon::prelude::*; @@ -15,8 +15,8 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { let keypairs: Vec = (0..validator_count) .collect::>() - .par_iter() - .map(|&i| generate_deterministic_keypair(i)) + .into_par_iter() + .map(generate_deterministic_keypair) .collect(); keypairs @@ -26,8 +26,9 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { /// /// This is used for testing only, and not to be used in production! pub fn generate_deterministic_keypair(validator_index: usize) -> Keypair { - let sk = SecretKey::from_bytes(&be_private_key(validator_index)) - .expect("be_private_key always returns valid keys"); - let pk = PublicKey::from_secret_key(&sk); - Keypair { sk, pk } + let raw = keypair(validator_index); + Keypair { + pk: PublicKey::from_raw(raw.pk), + sk: SecretKey::from_raw(raw.sk), + } } diff --git a/eth2/utils/bls/src/fake_public_key.rs b/eth2/utils/bls/src/fake_public_key.rs index e8dafaca6c..82b1c707f2 100644 --- a/eth2/utils/bls/src/fake_public_key.rs +++ b/eth2/utils/bls/src/fake_public_key.rs @@ -1,5 +1,6 @@ use super::{SecretKey, BLS_PUBLIC_KEY_BYTE_SIZE}; use milagro_bls::G1Point; +use milagro_bls::PublicKey as RawPublicKey; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, HexVisitor}; @@ -24,6 +25,13 @@ impl FakePublicKey { Self::zero() } + pub fn from_raw(raw: RawPublicKey) -> Self { + Self { + bytes: raw.clone().as_bytes(), + point: G1Point::new(), + } + } + /// Creates a new all-zero's public key pub fn zero() -> Self { Self { diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index e03b17686a..4b5abb58e3 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -20,6 +20,10 @@ impl PublicKey { PublicKey(RawPublicKey::from_secret_key(secret_key.as_raw())) } + pub fn from_raw(raw: RawPublicKey) -> Self { + Self(raw) + } + /// Returns the underlying signature. pub fn as_raw(&self) -> &RawPublicKey { &self.0 diff --git a/eth2/utils/bls/src/public_key_bytes.rs b/eth2/utils/bls/src/public_key_bytes.rs index f757351403..afdbcb2701 100644 --- a/eth2/utils/bls/src/public_key_bytes.rs +++ b/eth2/utils/bls/src/public_key_bytes.rs @@ -31,6 +31,7 @@ mod tests { } #[test] + #[cfg(not(feature = "fake_crypto"))] pub fn test_invalid_public_key() { let mut public_key_bytes = [0; BLS_PUBLIC_KEY_BYTE_SIZE]; public_key_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index 12f9a713b3..54da0fa0f3 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -20,6 +20,10 @@ impl SecretKey { SecretKey(RawSecretKey::random(&mut rand::thread_rng())) } + pub fn from_raw(raw: RawSecretKey) -> Self { + Self(raw) + } + /// Returns the underlying point as compressed bytes. fn as_bytes(&self) -> Vec { self.as_raw().as_bytes() diff --git a/eth2/utils/bls/src/signature_bytes.rs b/eth2/utils/bls/src/signature_bytes.rs index a30cecb4d5..b89c0f0d11 100644 --- a/eth2/utils/bls/src/signature_bytes.rs +++ b/eth2/utils/bls/src/signature_bytes.rs @@ -32,6 +32,7 @@ mod tests { } #[test] + #[cfg(not(feature = "fake_crypto"))] pub fn test_invalid_signature() { let mut signature_bytes = [0; BLS_SIG_BYTE_SIZE]; signature_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index e1c4dab040..31f9718cd4 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -7,5 +7,13 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +lazy_static = "1.4" num-bigint = "0.2" eth2_hashing = "0.1" +milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.10.0" } + +[dev-dependencies] +base64 = "0.10" +serde = "1.0" +serde_derive = "1.0" +serde_yaml = "0.8" diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 8ba2b9eba3..ac610ee776 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -1,130 +1,65 @@ //! Produces the "deterministic" validator private keys used for inter-operability testing for //! Ethereum 2.0 clients. //! -//! Each private key is the first hash in the sha2 hash-chain that is less than 2^255. As such, -//! keys generated here are **not secret** and are **not for production use**. +//! Each private key is the sha2 hash of the validator index (little-endian, padded to 32 bytes), +//! modulo the BLS-381 curve order. //! -//! Note: these keys have not been tested against a reference implementation, yet. +//! Keys generated here are **not secret** and are **not for production use**. It is trivial to +//! know the secret key for any validator. +//! +//!## Reference +//! +//! Reference implementation: +//! +//! https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen.py +//! +//! +//! This implementation passes the [reference implementation +//! tests](https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml). +#[macro_use] +extern crate lazy_static; use eth2_hashing::hash; +use milagro_bls::{Keypair, PublicKey, SecretKey}; use num_bigint::BigUint; -pub const CURVE_ORDER_BITS: usize = 255; pub const PRIVATE_KEY_BYTES: usize = 48; pub const HASH_BYTES: usize = 32; -fn hash_big_int_le(uint: BigUint) -> BigUint { - let mut preimage = uint.to_bytes_le(); - preimage.resize(32, 0_u8); - BigUint::from_bytes_le(&hash(&preimage)) +lazy_static! { + static ref CURVE_ORDER: BigUint = + "52435875175126190479447740508185965837690552500527637822603658699938581184513" + .parse::() + .expect("Curve order should be valid"); } -fn private_key(validator_index: usize) -> BigUint { - let mut key = BigUint::from(validator_index); - loop { - key = hash_big_int_le(key); - if key.bits() <= CURVE_ORDER_BITS { - break key; - } - } -} - -/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private -/// key is represented in big-endian bytes. +/// Return a G1 point for the given `validator_index`, encoded as a compressed point in +/// big-endian byte-ordering. pub fn be_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { - let vec = private_key(validator_index).to_bytes_be(); + let preimage = { + let mut bytes = [0; HASH_BYTES]; + let index = validator_index.to_le_bytes(); + bytes[0..index.len()].copy_from_slice(&index); + bytes + }; - let mut out = [0; PRIVATE_KEY_BYTES]; - out[PRIVATE_KEY_BYTES - vec.len()..PRIVATE_KEY_BYTES].copy_from_slice(&vec); - out + let privkey = BigUint::from_bytes_le(&hash(&preimage)) % &*CURVE_ORDER; + + let mut bytes = [0; PRIVATE_KEY_BYTES]; + let privkey_bytes = privkey.to_bytes_be(); + bytes[PRIVATE_KEY_BYTES - privkey_bytes.len()..].copy_from_slice(&privkey_bytes); + bytes } -/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private -/// key is represented in little-endian bytes. -pub fn le_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { - let vec = private_key(validator_index).to_bytes_le(); +/// Return a public and private keypair for a given `validator_index`. +pub fn keypair(validator_index: usize) -> Keypair { + let sk = SecretKey::from_bytes(&be_private_key(validator_index)).expect(&format!( + "Should build valid private key for validator index {}", + validator_index + )); - let mut out = [0; PRIVATE_KEY_BYTES]; - out[0..vec.len()].copy_from_slice(&vec); - out -} - -#[cfg(test)] -mod tests { - use super::*; - - fn flip(vec: &[u8]) -> Vec { - let len = vec.len(); - let mut out = vec![0; len]; - for i in 0..len { - out[len - 1 - i] = vec[i]; - } - out - } - - fn pad_le_bls(mut vec: Vec) -> Vec { - vec.resize(PRIVATE_KEY_BYTES, 0_u8); - vec - } - - fn pad_be_bls(mut vec: Vec) -> Vec { - let mut out = vec![0; PRIVATE_KEY_BYTES - vec.len()]; - out.append(&mut vec); - out - } - - fn pad_le_hash(index: usize) -> Vec { - let mut vec = index.to_le_bytes().to_vec(); - vec.resize(HASH_BYTES, 0_u8); - vec - } - - fn multihash(index: usize, rounds: usize) -> Vec { - let mut vec = pad_le_hash(index); - for _ in 0..rounds { - vec = hash(&vec); - } - vec - } - - fn compare(validator_index: usize, preimage: &[u8]) { - assert_eq!( - &le_private_key(validator_index)[..], - &pad_le_bls(hash(preimage))[..] - ); - assert_eq!( - &be_private_key(validator_index)[..], - &pad_be_bls(flip(&hash(preimage)))[..] - ); - } - - #[test] - fn consistency() { - for i in 0..256 { - let le = BigUint::from_bytes_le(&le_private_key(i)); - let be = BigUint::from_bytes_be(&be_private_key(i)); - assert_eq!(le, be); - } - } - - #[test] - fn non_repeats() { - // These indices only need one hash to be in the curve order. - compare(0, &pad_le_hash(0)); - compare(3, &pad_le_hash(3)); - } - - #[test] - fn repeats() { - // Index 5 needs 5x hashes to get into the curve order. - compare(5, &multihash(5, 5)); - } - - #[test] - fn doesnt_panic() { - for i in 0..256 { - be_private_key(i); - le_private_key(i); - } + Keypair { + pk: PublicKey::from_secret_key(&sk), + sk, } } diff --git a/eth2/utils/eth2_interop_keypairs/tests/test.rs b/eth2/utils/eth2_interop_keypairs/tests/test.rs new file mode 100644 index 0000000000..0d89eaa4dc --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/tests/test.rs @@ -0,0 +1,64 @@ +#![cfg(test)] +use eth2_interop_keypairs::{be_private_key, keypair}; +use num_bigint::BigUint; + +#[test] +fn reference_private_keys() { + // Sourced from: + // + // https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml + let reference = [ + "16808672146709759238327133555736750089977066230599028589193936481731504400486", + "37006103240406073079686739739280712467525465637222501547219594975923976982528", + "22330876536127119444572216874798222843352868708084730796787004036811744442455", + "17048462031355941381150076874414096388968985457797372268770826099852902060945", + "28647806952216650698330424381872693846361470773871570637461872359310549743691", + "2416304019107052589452838695606585506736351107897780798170812672519914514344", + "7300215445567548136411883691093515822872548648751398235557229381530420545683", + "26495790445032093722332687600112008700915252495659977774957922313678954054133", + "2908643403277969554503670470854573663206729491025062456164283925661321952518", + "19554639423851580804889717218680781396599791537051606512605582393920758869044", + ]; + reference + .into_iter() + .enumerate() + .for_each(|(i, reference)| { + let bytes = be_private_key(i); + let num = BigUint::from_bytes_be(&bytes); + assert_eq!(&num.to_str_radix(10), reference) + }); +} + +#[test] +fn reference_public_keys() { + // Sourced from: + // + // https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml + let reference = [ + "qZp27XeW974i1bfoXe63xWd+iOUR4LM3YY+MTrYTSbS/LRU/ZJ97UzWf6LlKOORM", + "uJvrxpl2lyajGMjplxvTFxKXxhrqSmV4p6T5S1R9y6W6wWqJEItrah/jaV0ah0oL", + "o6MrD4tN24PxoKhT2B3XJd/ld9T0w9uOzlLOKwJuyoSBXBp+jpKk3j11VzO/fkqb", + "iMFB33fNnY16cadcgmxBqcnwPG7hsYDz54UvaigAmd7TUbWNZuZTr45CgWpNj1Mu", + "gSg7eiDhykYOvZu9dwBdVXNwyrsfmkT1MMTExmIw9nX434tMKBiFGqfXeoDKWkpe", + "qwvdoPhfhC9DG+rM8SUL8f17pRtBAP1kNktkAf2oW7AGmz5xW1iBloTn/AsQpyo0", + "mXfxyLcxqNVVgUa/uGyuomQ088WHi1ib8oCkLJFZ5wDp3w5AhilsILAR0ueMJ9Nz", + "qNTHwneVpyWWExfvWVOnAy7W2Dc524sOinI1PRuLRDlCf376LInKoDzJ8o+Muris", + "ptMQ27+rmiJFD1mZP4ekzl22Ij87Xx8w0sTscYki1ADgs8d0HejlmWD3JBGg7hCn", + "mJNBPAAoOj+e2f2YRd2hzqOCKNIlZ/lUHczDV+VKLWpuIEEDySVky8BfSQWsfEk6", + ]; + reference + .into_iter() + .enumerate() + .for_each(|(i, reference)| { + let pair = keypair(i); + let reference = base64::decode(reference).expect("Reference should be valid base64"); + + assert_eq!( + reference.len(), + 48, + "Reference should be 48 bytes (public key size)" + ); + + assert_eq!(pair.pk.as_bytes(), reference); + }); +} diff --git a/eth2/utils/lighthouse_bootstrap/Cargo.toml b/eth2/utils/lighthouse_bootstrap/Cargo.toml new file mode 100644 index 0000000000..cfc4c6bafd --- /dev/null +++ b/eth2/utils/lighthouse_bootstrap/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "lighthouse_bootstrap" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +eth2_config = { path = "../eth2_config" } +eth2-libp2p = { path = "../../../beacon_node/eth2-libp2p" } +reqwest = "0.9" +url = "1.2" +types = { path = "../../types" } +serde = "1.0" +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } diff --git a/beacon_node/client/src/bootstrapper.rs b/eth2/utils/lighthouse_bootstrap/src/lib.rs similarity index 77% rename from beacon_node/client/src/bootstrapper.rs rename to eth2/utils/lighthouse_bootstrap/src/lib.rs index c94d9a51d8..92a587ff2e 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/eth2/utils/lighthouse_bootstrap/src/lib.rs @@ -1,14 +1,20 @@ +use eth2_config::Eth2Config; use eth2_libp2p::{ multiaddr::{Multiaddr, Protocol}, Enr, }; use reqwest::{Error as HttpError, Url}; use serde::Deserialize; +use slog::{error, Logger}; use std::borrow::Cow; use std::net::Ipv4Addr; +use std::time::Duration; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; use url::Host; +pub const RETRY_SLEEP_MILLIS: u64 = 100; +pub const RETRY_WARN_INTERVAL: u64 = 30; + #[derive(Debug)] enum Error { InvalidUrl, @@ -30,11 +36,35 @@ pub struct Bootstrapper { } impl Bootstrapper { - /// Parses the given `server` as a URL, instantiating `Self`. - pub fn from_server_string(server: String) -> Result { - Ok(Self { + /// Parses the given `server` as a URL, instantiating `Self` and blocking until a connection + /// can be made with the server. + /// + /// Never times out. + pub fn connect(server: String, log: &Logger) -> Result { + let bootstrapper = Self { url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, - }) + }; + + let mut retry_count = 0; + loop { + match bootstrapper.enr() { + Ok(_) => break, + Err(_) => { + if retry_count % RETRY_WARN_INTERVAL == 0 { + error!( + log, + "Failed to contact bootstrap server"; + "retry_count" => retry_count, + "retry_delay_millis" => RETRY_SLEEP_MILLIS, + ); + } + retry_count += 1; + std::thread::sleep(Duration::from_millis(RETRY_SLEEP_MILLIS)); + } + } + } + + Ok(bootstrapper) } /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. @@ -46,8 +76,12 @@ impl Bootstrapper { /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of /// `/ipv4/172.0.0.1/tcp/9000`. - pub fn best_effort_multiaddr(&self) -> Option { - let tcp_port = self.listen_port().ok()?; + pub fn best_effort_multiaddr(&self, port: Option) -> Option { + let tcp_port = if let Some(port) = port { + port + } else { + self.listen_port().ok()? + }; let mut multiaddr = Multiaddr::with_capacity(2); @@ -70,6 +104,11 @@ impl Bootstrapper { } } + /// Returns the servers Eth2Config. + pub fn eth2_config(&self) -> Result { + get_eth2_config(self.url.clone()).map_err(|e| format!("Unable to get Eth2Config: {:?}", e)) + } + /// Returns the servers ENR address. pub fn enr(&self) -> Result { get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) @@ -125,6 +164,19 @@ fn get_slots_per_epoch(mut url: Url) -> Result { .map_err(Into::into) } +fn get_eth2_config(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("spec").push("eth2_config"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { url.path_segments_mut() .map(|mut url| { diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index 6ef6cc0aac..5ffb6af532 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -7,3 +7,8 @@ edition = "2018" [dependencies] ethereum-types = "0.6" eth2_hashing = { path = "../eth2_hashing" } +lazy_static = "1.3.0" + +[dev-dependencies] +quickcheck = "0.8" +quickcheck_macros = "0.8" diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index bc8bcea127..73a972c759 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -1,6 +1,138 @@ +#[macro_use] +extern crate lazy_static; + use eth2_hashing::hash; use ethereum_types::H256; +const MAX_TREE_DEPTH: usize = 32; +const EMPTY_SLICE: &[H256] = &[]; + +lazy_static! { + /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. + static ref ZERO_HASHES: Vec = { + let mut hashes = vec![H256::from([0; 32]); MAX_TREE_DEPTH + 1]; + + for i in 0..MAX_TREE_DEPTH { + hashes[i + 1] = hash_concat(hashes[i], hashes[i]); + } + + hashes + }; + + /// Zero nodes to act as "synthetic" left and right subtrees of other zero nodes. + static ref ZERO_NODES: Vec = { + (0..MAX_TREE_DEPTH + 1).map(MerkleTree::Zero).collect() + }; +} + +/// Right-sparse Merkle tree. +/// +/// Efficiently represents a Merkle tree of fixed depth where only the first N +/// indices are populated by non-zero leaves (perfect for the deposit contract tree). +#[derive(Debug)] +pub enum MerkleTree { + /// Leaf node with the hash of its content. + Leaf(H256), + /// Internal node with hash, left subtree and right subtree. + Node(H256, Box, Box), + /// Zero subtree of a given depth. + /// + /// It represents a Merkle tree of 2^depth zero leaves. + Zero(usize), +} + +impl MerkleTree { + /// Create a new Merkle tree from a list of leaves and a fixed depth. + pub fn create(leaves: &[H256], depth: usize) -> Self { + use MerkleTree::*; + + if leaves.is_empty() { + return Zero(depth); + } + + match depth { + 0 => { + debug_assert_eq!(leaves.len(), 1); + Leaf(leaves[0]) + } + _ => { + // Split leaves into left and right subtrees + let subtree_capacity = 2usize.pow(depth as u32 - 1); + let (left_leaves, right_leaves) = if leaves.len() <= subtree_capacity { + (leaves, EMPTY_SLICE) + } else { + leaves.split_at(subtree_capacity) + }; + + let left_subtree = MerkleTree::create(left_leaves, depth - 1); + let right_subtree = MerkleTree::create(right_leaves, depth - 1); + let hash = hash_concat(left_subtree.hash(), right_subtree.hash()); + + Node(hash, Box::new(left_subtree), Box::new(right_subtree)) + } + } + } + + /// Retrieve the root hash of this Merkle tree. + pub fn hash(&self) -> H256 { + match *self { + MerkleTree::Leaf(h) => h, + MerkleTree::Node(h, _, _) => h, + MerkleTree::Zero(depth) => ZERO_HASHES[depth], + } + } + + /// Get a reference to the left and right subtrees if they exist. + pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> { + match *self { + MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, + MerkleTree::Node(_, ref l, ref r) => Some((l, r)), + MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])), + } + } + + /// Is this Merkle tree a leaf? + pub fn is_leaf(&self) -> bool { + match self { + MerkleTree::Leaf(_) => true, + _ => false, + } + } + + /// Return the leaf at `index` and a Merkle proof of its inclusion. + /// + /// The Merkle proof is in "bottom-up" order, starting with a leaf node + /// and moving up the tree. Its length will be exactly equal to `depth`. + pub fn generate_proof(&self, index: usize, depth: usize) -> (H256, Vec) { + let mut proof = vec![]; + let mut current_node = self; + let mut current_depth = depth; + while current_depth > 0 { + let ith_bit = (index >> (current_depth - 1)) & 0x01; + // Note: unwrap is safe because leaves are only ever constructed at depth == 0. + let (left, right) = current_node.left_and_right_branches().unwrap(); + + // Go right, include the left branch in the proof. + if ith_bit == 1 { + proof.push(left.hash()); + current_node = right; + } else { + proof.push(right.hash()); + current_node = left; + } + current_depth -= 1; + } + + debug_assert_eq!(proof.len(), depth); + debug_assert!(current_node.is_leaf()); + + // Put proof in bottom-up order. + proof.reverse(); + + (current_node.hash(), proof) + } +} + /// Verify a proof that `leaf` exists at `index` in a Merkle tree rooted at `root`. /// /// The `branch` argument is the main component of the proof: it should be a list of internal @@ -46,15 +178,66 @@ fn concat(mut vec1: Vec, mut vec2: Vec) -> Vec { vec1 } +/// Compute the hash of two other hashes concatenated. +fn hash_concat(h1: H256, h2: H256) -> H256 { + H256::from_slice(&hash(&concat( + h1.as_bytes().to_vec(), + h2.as_bytes().to_vec(), + ))) +} + #[cfg(test)] mod tests { use super::*; + use quickcheck::TestResult; + use quickcheck_macros::quickcheck; - fn hash_concat(h1: H256, h2: H256) -> H256 { - H256::from_slice(&hash(&concat( - h1.as_bytes().to_vec(), - h2.as_bytes().to_vec(), - ))) + /// Check that we can: + /// 1. Build a MerkleTree from arbitrary leaves and an arbitrary depth. + /// 2. Generate valid proofs for all of the leaves of this MerkleTree. + #[quickcheck] + fn quickcheck_create_and_verify(int_leaves: Vec, depth: usize) -> TestResult { + if depth > MAX_TREE_DEPTH || int_leaves.len() > 2usize.pow(depth as u32) { + return TestResult::discard(); + } + + let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); + let merkle_tree = MerkleTree::create(&leaves, depth); + let merkle_root = merkle_tree.hash(); + + let proofs_ok = (0..leaves.len()).into_iter().all(|i| { + let (leaf, branch) = merkle_tree.generate_proof(i, depth); + leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) + }); + + TestResult::from_bool(proofs_ok) + } + + #[test] + fn sparse_zero_correct() { + let depth = 2; + let zero = H256::from([0x00; 32]); + let dense_tree = MerkleTree::create(&[zero, zero, zero, zero], depth); + let sparse_tree = MerkleTree::create(&[], depth); + assert_eq!(dense_tree.hash(), sparse_tree.hash()); + } + + #[test] + fn create_small_example() { + // Construct a small merkle tree manually and check that it's consistent with + // the MerkleTree type. + let leaf_b00 = H256::from([0xAA; 32]); + let leaf_b01 = H256::from([0xBB; 32]); + let leaf_b10 = H256::from([0xCC; 32]); + let leaf_b11 = H256::from([0xDD; 32]); + + let node_b0x = hash_concat(leaf_b00, leaf_b01); + let node_b1x = hash_concat(leaf_b10, leaf_b11); + + let root = hash_concat(node_b0x, node_b1x); + + let tree = MerkleTree::create(&[leaf_b00, leaf_b01, leaf_b10, leaf_b11], 2); + assert_eq!(tree.hash(), root); } #[test] diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 871743c9e6..fd3bf029be 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -5,24 +5,37 @@ mod metrics; mod system_time_slot_clock; mod testing_slot_clock; -use std::time::Duration; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock}; -pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock}; +pub use crate::system_time_slot_clock::SystemTimeSlotClock; +pub use crate::testing_slot_clock::TestingSlotClock; pub use metrics::scrape_for_metrics; pub use types::Slot; pub trait SlotClock: Send + Sync + Sized { - type Error; + fn from_eth2_genesis( + genesis_slot: Slot, + genesis_seconds: u64, + slot_duration: Duration, + ) -> Option { + let duration_between_now_and_unix_epoch = + SystemTime::now().duration_since(UNIX_EPOCH).ok()?; + let duration_between_unix_epoch_and_genesis = Duration::from_secs(genesis_seconds); - /// Create a new `SlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self; + if duration_between_now_and_unix_epoch < duration_between_unix_epoch_and_genesis { + None + } else { + let genesis_instant = Instant::now() + - (duration_between_now_and_unix_epoch - duration_between_unix_epoch_and_genesis); + Some(Self::new(genesis_slot, genesis_instant, slot_duration)) + } + } - fn present_slot(&self) -> Result, Self::Error>; + fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self; - fn duration_to_next_slot(&self) -> Result, Self::Error>; + fn now(&self) -> Option; - fn slot_duration_millis(&self) -> u64; + fn duration_to_next_slot(&self) -> Option; + + fn slot_duration(&self) -> Duration; } diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs index e0d3923e00..d1de491d00 100644 --- a/eth2/utils/slot_clock/src/metrics.rs +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -17,8 +17,8 @@ lazy_static! { /// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock. pub fn scrape_for_metrics(clock: &U) { - let present_slot = match clock.present_slot() { - Ok(Some(slot)) => slot, + let present_slot = match clock.now() { + Some(slot) => slot, _ => Slot::new(0), }; @@ -28,5 +28,8 @@ pub fn scrape_for_metrics(clock: &U) { present_slot.epoch(T::slots_per_epoch()).as_u64() as i64, ); set_gauge(&SLOTS_PER_EPOCH, T::slots_per_epoch() as i64); - set_gauge(&MILLISECONDS_PER_SLOT, clock.slot_duration_millis() as i64); + set_gauge( + &MILLISECONDS_PER_SLOT, + clock.slot_duration().as_millis() as i64, + ); } diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index c493a8be83..0d4a52ef64 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -1,99 +1,68 @@ use super::SlotClock; -use std::time::{Duration, SystemTime}; +use std::time::{Duration, Instant}; use types::Slot; pub use std::time::SystemTimeError; -#[derive(Debug, PartialEq)] -pub enum Error { - SlotDurationIsZero, - SystemTimeError(String), -} - /// Determines the present slot based upon the present system time. #[derive(Clone)] pub struct SystemTimeSlotClock { genesis_slot: Slot, - genesis_seconds: u64, - slot_duration_seconds: u64, + genesis: Instant, + slot_duration: Duration, } impl SlotClock for SystemTimeSlotClock { - type Error = Error; + fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self { + if slot_duration.as_millis() == 0 { + panic!("SystemTimeSlotClock cannot have a < 1ms slot duration."); + } - /// Create a new `SystemTimeSlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self { Self { genesis_slot, - genesis_seconds, - slot_duration_seconds, + genesis, + slot_duration, } } - fn present_slot(&self) -> Result, Error> { - if self.slot_duration_seconds == 0 { - return Err(Error::SlotDurationIsZero); - } + fn now(&self) -> Option { + let now = Instant::now(); - let syslot_time = SystemTime::now(); - let duration_since_epoch = syslot_time.duration_since(SystemTime::UNIX_EPOCH)?; - let duration_since_genesis = - duration_since_epoch.checked_sub(Duration::from_secs(self.genesis_seconds)); - - match duration_since_genesis { - None => Ok(None), - Some(d) => Ok(slot_from_duration(self.slot_duration_seconds, d) - .and_then(|s| Some(s + self.genesis_slot))), + if now < self.genesis { + None + } else { + let slot = Slot::from( + (now.duration_since(self.genesis).as_millis() / self.slot_duration.as_millis()) + as u64, + ); + Some(slot + self.genesis_slot) } } - fn duration_to_next_slot(&self) -> Result, Error> { - duration_to_next_slot(self.genesis_seconds, self.slot_duration_seconds) + fn duration_to_next_slot(&self) -> Option { + let now = Instant::now(); + if now < self.genesis { + None + } else { + let duration_since_genesis = now - self.genesis; + let millis_since_genesis = duration_since_genesis.as_millis(); + let millis_per_slot = self.slot_duration.as_millis(); + + let current_slot = millis_since_genesis / millis_per_slot; + let next_slot = current_slot + 1; + + let next_slot = + self.genesis + Duration::from_millis((next_slot * millis_per_slot) as u64); + + Some(next_slot.duration_since(now)) + } } - fn slot_duration_millis(&self) -> u64 { - self.slot_duration_seconds * 1000 + fn slot_duration(&self) -> Duration { + self.slot_duration } } -impl From for Error { - fn from(e: SystemTimeError) -> Error { - Error::SystemTimeError(format!("{:?}", e)) - } -} - -fn slot_from_duration(slot_duration_seconds: u64, duration: Duration) -> Option { - Some(Slot::new( - duration.as_secs().checked_div(slot_duration_seconds)?, - )) -} -// calculate the duration to the next slot -fn duration_to_next_slot( - genesis_time: u64, - seconds_per_slot: u64, -) -> Result, Error> { - let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?; - let genesis_time = Duration::from_secs(genesis_time); - - if now < genesis_time { - return Ok(None); - } - - let since_genesis = now - genesis_time; - - let elapsed_slots = since_genesis.as_secs() / seconds_per_slot; - - let next_slot_start_seconds = (elapsed_slots + 1) - .checked_mul(seconds_per_slot) - .expect("Next slot time should not overflow u64"); - - let time_to_next_slot = Duration::from_secs(next_slot_start_seconds) - since_genesis; - - Ok(Some(time_to_next_slot)) -} - #[cfg(test)] mod tests { use super::*; @@ -104,71 +73,51 @@ mod tests { */ #[test] fn test_slot_now() { - let slot_time = 100; let genesis_slot = Slot::new(0); - let now = SystemTime::now(); - let since_epoch = now.duration_since(SystemTime::UNIX_EPOCH).unwrap(); + let prior_genesis = + |seconds_prior: u64| Instant::now() - Duration::from_secs(seconds_prior); - let genesis = since_epoch.as_secs() - slot_time * 89; + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(0), Duration::from_secs(1)); + assert_eq!(clock.now(), Some(Slot::new(0))); - let clock = SystemTimeSlotClock { + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(5), Duration::from_secs(1)); + assert_eq!(clock.now(), Some(Slot::new(5))); + + let clock = SystemTimeSlotClock::new( genesis_slot, - genesis_seconds: genesis, - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(89))); + Instant::now() - Duration::from_millis(500), + Duration::from_secs(1), + ); + assert_eq!(clock.now(), Some(Slot::new(0))); + assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); - let clock = SystemTimeSlotClock { + let clock = SystemTimeSlotClock::new( genesis_slot, - genesis_seconds: since_epoch.as_secs(), - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(0))); - - let clock = SystemTimeSlotClock { - genesis_slot, - genesis_seconds: since_epoch.as_secs() - slot_time * 42 - 5, - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(42))); + Instant::now() - Duration::from_millis(1_500), + Duration::from_secs(1), + ); + assert_eq!(clock.now(), Some(Slot::new(1))); + assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); } #[test] - fn test_slot_from_duration() { - let slot_time = 100; - - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(0)), - Some(Slot::new(0)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(10)), - Some(Slot::new(0)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(100)), - Some(Slot::new(1)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(101)), - Some(Slot::new(1)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(1000)), - Some(Slot::new(10)) - ); + #[should_panic] + fn zero_seconds() { + SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_secs(0)); } #[test] - fn test_slot_from_duration_slot_time_zero() { - let slot_time = 0; + #[should_panic] + fn zero_millis() { + SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_millis(0)); + } - assert_eq!(slot_from_duration(slot_time, Duration::from_secs(0)), None); - assert_eq!(slot_from_duration(slot_time, Duration::from_secs(10)), None); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(1000)), - None - ); + #[test] + #[should_panic] + fn less_than_one_millis() { + SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_nanos(999)); } } diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index f741d3b87a..d90cb157aa 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -1,12 +1,11 @@ use super::SlotClock; use std::sync::RwLock; -use std::time::Duration; +use std::time::{Duration, Instant}; use types::Slot; -#[derive(Debug, PartialEq)] -pub enum Error {} - -/// Determines the present slot based upon the present system time. +/// A slot clock where the slot is manually set instead of being determined by the system time. +/// +/// Useful for testing scenarios. pub struct TestingSlotClock { slot: RwLock, } @@ -17,32 +16,30 @@ impl TestingSlotClock { } pub fn advance_slot(&self) { - self.set_slot(self.present_slot().unwrap().unwrap().as_u64() + 1) + self.set_slot(self.now().unwrap().as_u64() + 1) } } impl SlotClock for TestingSlotClock { - type Error = Error; - - /// Create a new `TestingSlotClock` at `genesis_slot`. - fn new(genesis_slot: Slot, _genesis_seconds: u64, _slot_duration_seconds: u64) -> Self { + fn new(genesis_slot: Slot, _genesis: Instant, _slot_duration: Duration) -> Self { TestingSlotClock { slot: RwLock::new(genesis_slot), } } - fn present_slot(&self) -> Result, Error> { + fn now(&self) -> Option { let slot = *self.slot.read().expect("TestingSlotClock poisoned."); - Ok(Some(slot)) + Some(slot) } /// Always returns a duration of 1 second. - fn duration_to_next_slot(&self) -> Result, Error> { - Ok(Some(Duration::from_secs(1))) + fn duration_to_next_slot(&self) -> Option { + Some(Duration::from_secs(1)) } - fn slot_duration_millis(&self) -> u64 { - 0 + /// Always returns a slot duration of 0 seconds. + fn slot_duration(&self) -> Duration { + Duration::from_secs(0) } } @@ -52,11 +49,9 @@ mod tests { #[test] fn test_slot_now() { - let null = 0; - - let clock = TestingSlotClock::new(Slot::new(10), null, null); - assert_eq!(clock.present_slot(), Ok(Some(Slot::new(10)))); + let clock = TestingSlotClock::new(Slot::new(10), Instant::now(), Duration::from_secs(0)); + assert_eq!(clock.now(), Some(Slot::new(10))); clock.set_slot(123); - assert_eq!(clock.present_slot(), Ok(Some(Slot::new(123)))); + assert_eq!(clock.now(), Some(Slot::new(123))); } } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 927731f63f..2000f5409b 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -18,6 +18,7 @@ eth2_ssz = "0.1" eth2_config = { path = "../eth2/utils/eth2_config" } tree_hash = "0.1" clap = "2.32.0" +lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } protos = { path = "../protos" } slot_clock = { path = "../eth2/utils/slot_clock" } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 7bc504b233..3e13de7229 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -2,22 +2,48 @@ use bincode; use bls::Keypair; use clap::ArgMatches; use serde_derive::{Deserialize, Serialize}; -use slog::{debug, error, info, o, Drain}; +use slog::{error, info, o, warn, Drain}; use std::fs::{self, File, OpenOptions}; use std::io::{Error, ErrorKind}; +use std::ops::Range; use std::path::PathBuf; use std::sync::Mutex; -use types::{EthSpec, MainnetEthSpec}; +use types::{test_utils::generate_deterministic_keypair, EthSpec, MainnetEthSpec}; + +pub const DEFAULT_SERVER: &str = "localhost"; +pub const DEFAULT_SERVER_GRPC_PORT: &str = "5051"; +pub const DEFAULT_SERVER_HTTP_PORT: &str = "5052"; + +#[derive(Clone)] +pub enum KeySource { + /// Load the keypairs from disk. + Disk, + /// Generate the keypairs (insecure, generates predictable keys). + TestingKeypairRange(Range), +} + +impl Default for KeySource { + fn default() -> Self { + KeySource::Disk + } +} /// Stores the core configuration for this validator instance. #[derive(Clone, Serialize, Deserialize)] pub struct Config { /// The data directory, which stores all validator databases pub data_dir: PathBuf, + /// The source for loading keypairs + #[serde(skip)] + pub key_source: KeySource, /// The path where the logs will be outputted pub log_file: PathBuf, /// The server at which the Beacon Node can be contacted pub server: String, + /// The gRPC port on the server + pub server_grpc_port: u16, + /// The HTTP port on the server, for the REST API. + pub server_http_port: u16, /// The number of slots per epoch. pub slots_per_epoch: u64, } @@ -29,14 +55,33 @@ impl Default for Config { fn default() -> Self { Self { data_dir: PathBuf::from(".lighthouse-validator"), + key_source: <_>::default(), log_file: PathBuf::from(""), - server: "localhost:5051".to_string(), + server: DEFAULT_SERVER.into(), + server_grpc_port: DEFAULT_SERVER_GRPC_PORT + .parse::() + .expect("gRPC port constant should be valid"), + server_http_port: DEFAULT_SERVER_GRPC_PORT + .parse::() + .expect("HTTP port constant should be valid"), slots_per_epoch: MainnetEthSpec::slots_per_epoch(), } } } impl Config { + /// Returns the full path for the client data directory (not just the name of the directory). + pub fn full_data_dir(&self) -> Option { + dirs::home_dir().map(|path| path.join(&self.data_dir)) + } + + /// Creates the data directory (and any non-existing parent directories). + pub fn create_data_dir(&self) -> Option { + let path = dirs::home_dir()?.join(&self.data_dir); + fs::create_dir_all(&path).ok()?; + Some(path) + } + /// Apply the following arguments to `self`, replacing values if they are specified in `args`. /// /// Returns an error if arguments are obviously invalid. May succeed even if some values are @@ -94,67 +139,108 @@ impl Config { Ok(()) } - /// Try to load keys from validator_dir, returning None if none are found or an error. + /// Reads a single keypair from the given `path`. + /// + /// `path` should be the path to a directory containing a private key. The file name of `path` + /// must align with the public key loaded from it, otherwise an error is returned. + /// + /// An error will be returned if `path` is a file (not a directory). + fn read_keypair_file(&self, path: PathBuf) -> Result { + if !path.is_dir() { + return Err("Is not a directory".into()); + } + + let key_filename: PathBuf = path.join(DEFAULT_PRIVATE_KEY_FILENAME); + + if !key_filename.is_file() { + return Err(format!( + "Private key is not a file: {:?}", + key_filename.to_str() + )); + } + + let mut key_file = File::open(key_filename.clone()) + .map_err(|e| format!("Unable to open private key file: {}", e))?; + + let key: Keypair = bincode::deserialize_from(&mut key_file) + .map_err(|e| format!("Unable to deserialize private key: {:?}", e))?; + + let ki = key.identifier(); + if &ki + != &path + .file_name() + .ok_or_else(|| "Invalid path".to_string())? + .to_string_lossy() + { + return Err(format!( + "The validator key ({:?}) did not match the directory filename {:?}.", + ki, + path.to_str() + )); + } else { + Ok(key) + } + } + + pub fn fetch_keys_from_disk(&self, log: &slog::Logger) -> Result, String> { + Ok( + fs::read_dir(&self.full_data_dir().expect("Data dir must exist")) + .map_err(|e| format!("Failed to read datadir: {:?}", e))? + .filter_map(|validator_dir| { + let path = validator_dir.ok()?.path(); + + if path.is_dir() { + match self.read_keypair_file(path.clone()) { + Ok(keypair) => Some(keypair), + Err(e) => { + error!( + log, + "Failed to parse a validator keypair"; + "error" => e, + "path" => path.to_str(), + ); + None + } + } + } else { + None + } + }) + .collect(), + ) + } + + pub fn fetch_testing_keypairs( + &self, + range: std::ops::Range, + ) -> Result, String> { + Ok(range + .into_iter() + .map(generate_deterministic_keypair) + .collect()) + } + + /// Loads the keypairs according to `self.key_source`. Will return one or more keypairs, or an + /// error. #[allow(dead_code)] - pub fn fetch_keys(&self, log: &slog::Logger) -> Option> { - let key_pairs: Vec = fs::read_dir(&self.data_dir) - .ok()? - .filter_map(|validator_dir| { - let validator_dir = validator_dir.ok()?; - - if !(validator_dir.file_type().ok()?.is_dir()) { - // Skip non-directories (i.e. no files/symlinks) - return None; - } - - let key_filename = validator_dir.path().join(DEFAULT_PRIVATE_KEY_FILENAME); - - if !(key_filename.is_file()) { - info!( - log, - "Private key is not a file: {:?}", - key_filename.to_str() - ); - return None; - } - - debug!( - log, - "Deserializing private key from file: {:?}", - key_filename.to_str() - ); - - let mut key_file = File::open(key_filename.clone()).ok()?; - - let key: Keypair = if let Ok(key_ok) = bincode::deserialize_from(&mut key_file) { - key_ok - } else { - error!( - log, - "Unable to deserialize the private key file: {:?}", key_filename - ); - return None; - }; - - let ki = key.identifier(); - if ki != validator_dir.file_name().into_string().ok()? { - error!( - log, - "The validator key ({:?}) did not match the directory filename {:?}.", - ki, - &validator_dir.path().to_string_lossy() - ); - return None; - } - Some(key) - }) - .collect(); + pub fn fetch_keys(&self, log: &slog::Logger) -> Result, String> { + let keypairs = match &self.key_source { + KeySource::Disk => self.fetch_keys_from_disk(log)?, + KeySource::TestingKeypairRange(range) => { + warn!(log, "Using insecure private keys"); + self.fetch_testing_keypairs(range.clone())? + } + }; // Check if it's an empty vector, and return none. - if key_pairs.is_empty() { - None + if keypairs.is_empty() { + Err( + "No validator keypairs were found, unable to proceed. To generate \ + testing keypairs, see 'testnet range --help'." + .into(), + ) } else { - Some(key_pairs) + Ok(keypairs) } } diff --git a/validator_client/src/error.rs b/validator_client/src/error.rs index 97500f900b..e13f7ded51 100644 --- a/validator_client/src/error.rs +++ b/validator_client/src/error.rs @@ -1,16 +1,9 @@ -use slot_clock; - use error_chain::error_chain; error_chain! { links { } errors { - SlotClockError(e: slot_clock::SystemTimeSlotClockError) { - description("Error reading system time"), - display("SlotClockError: '{:?}'", e) - } - SystemTimeError(t: String ) { description("Error reading system time"), display("SystemTimeError: '{}'", t) diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 83a874df7d..39b2e3eaee 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -6,14 +6,16 @@ pub mod error; mod service; mod signer; -use crate::config::Config as ValidatorClientConfig; +use crate::config::{ + Config as ClientConfig, KeySource, DEFAULT_SERVER, DEFAULT_SERVER_GRPC_PORT, + DEFAULT_SERVER_HTTP_PORT, +}; use crate::service::Service as ValidatorService; -use clap::{App, Arg}; -use eth2_config::{read_from_file, write_to_file, Eth2Config}; +use clap::{App, Arg, ArgMatches, SubCommand}; +use eth2_config::Eth2Config; +use lighthouse_bootstrap::Bootstrapper; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, warn, Drain, Level}; -use std::fs; -use std::path::PathBuf; +use slog::{crit, error, info, o, Drain, Level, Logger}; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; pub const DEFAULT_SPEC: &str = "minimal"; @@ -21,6 +23,8 @@ pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator"; pub const CLIENT_CONFIG_FILENAME: &str = "validator-client.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; +type Result = core::result::Result; + fn main() { // Logging let decorator = slog_term::TermDecorator::new().build(); @@ -49,28 +53,47 @@ fn main() { .takes_value(true), ) .arg( - Arg::with_name("eth2-spec") - .long("eth2-spec") + Arg::with_name("spec") + .short("s") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type.") + .takes_value(true) + .possible_values(&["mainnet", "minimal", "interop"]) + .conflicts_with("eth2-config") + .global(true) + ) + .arg( + Arg::with_name("eth2-config") + .long("eth2-config") .short("e") .value_name("TOML_FILE") - .help("Path to Ethereum 2.0 specifications file.") + .help("Path to Ethereum 2.0 config and specification file (e.g., eth2_spec.toml).") .takes_value(true), ) .arg( Arg::with_name("server") .long("server") - .value_name("server") + .value_name("NETWORK_ADDRESS") .help("Address to connect to BeaconNode.") + .default_value(DEFAULT_SERVER) .takes_value(true), ) .arg( - Arg::with_name("default-spec") - .long("default-spec") - .value_name("TITLE") - .short("default-spec") - .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) + Arg::with_name("server-grpc-port") + .long("g") + .value_name("PORT") + .help("Port to use for gRPC API connection to the server.") + .default_value(DEFAULT_SERVER_GRPC_PORT) + .takes_value(true), + ) + .arg( + Arg::with_name("server-http-port") + .long("h") + .value_name("PORT") + .help("Port to use for HTTP API connection to the server.") + .default_value(DEFAULT_SERVER_HTTP_PORT) + .takes_value(true), ) .arg( Arg::with_name("debug-level") @@ -82,6 +105,33 @@ fn main() { .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) .default_value("info"), ) + /* + * The "testnet" sub-command. + * + * Used for starting testnet validator clients. + */ + .subcommand(SubCommand::with_name("testnet") + .about("Starts a testnet validator using INSECURE, predicatable private keys, based off the canonical \ + validator index. ONLY USE FOR TESTING PURPOSES!") + .arg( + Arg::with_name("bootstrap") + .short("b") + .long("bootstrap") + .help("Connect to the RPC server to download the eth2_config via the HTTP API.") + ) + .subcommand(SubCommand::with_name("insecure") + .about("Uses the standard, predicatable `interop` keygen method to produce a range \ + of predicatable private keys and starts performing their validator duties.") + .arg(Arg::with_name("first_validator") + .value_name("VALIDATOR_INDEX") + .required(true) + .help("The first validator public key to be generated for this client.")) + .arg(Arg::with_name("validator_count") + .value_name("COUNT") + .required(true) + .help("The number of validators.")) + ) + ) .get_matches(); let drain = match matches.value_of("debug-level") { @@ -93,133 +143,15 @@ fn main() { Some("crit") => drain.filter_level(Level::Critical), _ => unreachable!("guarded by clap"), }; - let mut log = slog::Logger::root(drain.fuse(), o!()); - - let data_dir = match matches - .value_of("datadir") - .and_then(|v| Some(PathBuf::from(v))) - { - Some(v) => v, - None => { - // use the default - let mut default_dir = match dirs::home_dir() { - Some(v) => v, - None => { - crit!(log, "Failed to find a home directory"); - return; - } - }; - default_dir.push(DEFAULT_DATA_DIR); - default_dir - } - }; - - // create the directory if needed - match fs::create_dir_all(&data_dir) { - Ok(_) => {} + let log = slog::Logger::root(drain.fuse(), o!()); + let (client_config, eth2_config) = match get_configs(&matches, &log) { + Ok(tuple) => tuple, Err(e) => { - crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e)); - return; - } - } - - let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - - // Attempt to load the `ClientConfig` from disk. - // - // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::( - client_config_path.clone(), - ) { - Ok(Some(c)) => c, - Ok(None) => { - let default = ValidatorClientConfig::default(); - if let Err(e) = write_to_file(client_config_path.clone(), &default) { - crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); - return; - } - default - } - Err(e) => { - crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); - return; - } - }; - - // Ensure the `data_dir` in the config matches that supplied to the CLI. - client_config.data_dir = data_dir.clone(); - - // Update the client config with any CLI args. - match client_config.apply_cli_args(&matches, &mut log) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); - return; - } - }; - - let eth2_config_path: PathBuf = matches - .value_of("eth2-spec") - .and_then(|s| Some(PathBuf::from(s))) - .unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME)); - - // Initialise the `Eth2Config`. - // - // If a CLI parameter is set, overwrite any config file present. - // If a parameter is not set, use either the config file present or default to minimal. - let cli_config = match matches.value_of("default-spec") { - Some("mainnet") => Some(Eth2Config::mainnet()), - Some("minimal") => Some(Eth2Config::minimal()), - Some("interop") => Some(Eth2Config::interop()), - _ => None, - }; - // if a CLI flag is specified, write the new config if it doesn't exist, - // otherwise notify the user that the file will not be written. - let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { - Ok(config) => config, - Err(e) => { - crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); - return; - } - }; - - let mut eth2_config = { - if let Some(cli_config) = cli_config { - if eth2_config_from_file.is_none() { - // write to file if one doesn't exist - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - } else { - warn!( - log, - "Eth2Config file exists. Configuration file is ignored, using default" - ); - } - cli_config - } else { - // CLI config not specified, read from disk - match eth2_config_from_file { - Some(config) => config, - None => { - // set default to minimal - let eth2_config = Eth2Config::minimal(); - if let Err(e) = write_to_file(eth2_config_path, ð2_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - eth2_config - } - } - } - }; - - // Update the eth2 config with any CLI flags. - match eth2_config.apply_cli_args(&matches) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s); + crit!( + log, + "Unable to initialize configuration"; + "error" => e + ); return; } }; @@ -227,8 +159,7 @@ fn main() { info!( log, "Starting validator client"; - "datadir" => client_config.data_dir.to_str(), - "spec_constants" => ð2_config.spec_constants, + "datadir" => client_config.full_data_dir().expect("Unable to find datadir").to_str(), ); let result = match eth2_config.spec_constants.as_str() { @@ -260,3 +191,113 @@ fn main() { Err(e) => crit!(log, "Validator client exited with error"; "error" => e.to_string()), } } + +/// Parses the CLI arguments and attempts to load the client and eth2 configuration. +/// +/// This is not a pure function, it reads from disk and may contact network servers. +pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result<(ClientConfig, Eth2Config)> { + let mut client_config = ClientConfig::default(); + + if let Some(server) = cli_args.value_of("server") { + client_config.server = server.to_string(); + } + + if let Some(port) = cli_args.value_of("server-http-port") { + client_config.server_http_port = port + .parse::() + .map_err(|e| format!("Unable to parse HTTP port: {:?}", e))?; + } + + if let Some(port) = cli_args.value_of("server-grpc-port") { + client_config.server_grpc_port = port + .parse::() + .map_err(|e| format!("Unable to parse gRPC port: {:?}", e))?; + } + + info!( + log, + "Beacon node connection info"; + "grpc_port" => client_config.server_grpc_port, + "http_port" => client_config.server_http_port, + "server" => &client_config.server, + ); + + match cli_args.subcommand() { + ("testnet", Some(sub_cli_args)) => { + if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") { + return Err( + "Cannot specify --eth2-config and --bootstrap as it may result \ + in ambiguity." + .into(), + ); + } + process_testnet_subcommand(sub_cli_args, client_config, log) + } + _ => return Err("You must use the testnet command. See '--help'.".into()), + } +} + +/// Parses the `testnet` CLI subcommand. +/// +/// This is not a pure function, it reads from disk and may contact network servers. +fn process_testnet_subcommand( + cli_args: &ArgMatches, + mut client_config: ClientConfig, + log: &Logger, +) -> Result<(ClientConfig, Eth2Config)> { + let eth2_config = if cli_args.is_present("bootstrap") { + info!(log, "Connecting to bootstrap server"); + let bootstrapper = Bootstrapper::connect( + format!( + "http://{}:{}", + client_config.server, client_config.server_http_port + ), + &log, + )?; + + let eth2_config = bootstrapper.eth2_config()?; + + info!( + log, + "Bootstrapped eth2 config via HTTP"; + "slot_time_millis" => eth2_config.spec.milliseconds_per_slot, + "spec" => ð2_config.spec_constants, + ); + + eth2_config + } else { + match cli_args.value_of("spec") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + Some("interop") => Eth2Config::interop(), + _ => return Err("No --spec flag provided. See '--help'.".into()), + } + }; + + client_config.key_source = match cli_args.subcommand() { + ("insecure", Some(sub_cli_args)) => { + let first = sub_cli_args + .value_of("first_validator") + .ok_or_else(|| "No first validator supplied")? + .parse::() + .map_err(|e| format!("Unable to parse first validator: {:?}", e))?; + let count = sub_cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator count supplied")? + .parse::() + .map_err(|e| format!("Unable to parse validator count: {:?}", e))?; + + info!( + log, + "Generating unsafe testing keys"; + "first_validator" => first, + "count" => count + ); + + KeySource::TestingKeypairRange(first..first + count) + } + _ => KeySource::Disk, + }; + + Ok((client_config, eth2_config)) +} diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 3ddb96e4c2..8adc79b91e 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -13,7 +13,6 @@ use crate::block_producer::{BeaconBlockGrpcClient, BlockProducer}; use crate::config::Config as ValidatorConfig; use crate::duties::{BeaconNodeDuties, DutiesManager, EpochDutiesMap}; use crate::error as error_chain; -use crate::error::ErrorKind; use crate::signer::Signer; use bls::Keypair; use eth2_config::Eth2Config; @@ -74,12 +73,15 @@ impl Service error_chain::Result> { - // initialise the beacon node client to check for a connection + let server_url = format!( + "{}:{}", + client_config.server, client_config.server_grpc_port + ); let env = Arc::new(EnvBuilder::new().build()); // Beacon node gRPC beacon node endpoints. let beacon_node_client = { - let ch = ChannelBuilder::new(env.clone()).connect(&client_config.server); + let ch = ChannelBuilder::new(env.clone()).connect(&server_url); BeaconNodeServiceClient::new(ch) }; @@ -87,9 +89,14 @@ impl Service { - warn!(log, "Could not connect to node. Error: {}", e); - info!(log, "Retrying in 5 seconds..."); - std::thread::sleep(Duration::from_secs(5)); + let retry_seconds = 5; + warn!( + log, + "Could not connect to beacon node"; + "error" => format!("{:?}", e), + "retry_in" => format!("{} seconds", retry_seconds), + ); + std::thread::sleep(Duration::from_secs(retry_seconds)); continue; } Ok(info) => { @@ -123,7 +130,13 @@ impl Service node_info.version.clone(), "Chain ID" => node_info.network_id, "Genesis time" => genesis_time); + info!( + log, + "Beacon node connected"; + "version" => node_info.version.clone(), + "network_id" => node_info.network_id, + "genesis_time" => genesis_time + ); let proto_fork = node_info.get_fork(); let mut previous_version: [u8; 4] = [0; 4]; @@ -140,7 +153,7 @@ impl Service Service(|| { + "Unable to start slot clock. Genesis may not have occurred yet. Exiting.".into() + })?; - let current_slot = slot_clock - .present_slot() - .map_err(ErrorKind::SlotClockError)? - .ok_or_else::(|| { - "Genesis is not in the past. Exiting.".into() - })?; + let current_slot = slot_clock.now().ok_or_else::(|| { + "Genesis has not yet occurred. Exiting.".into() + })?; /* Generate the duties manager */ // Load generated keypairs - let keypairs = match client_config.fetch_keys(&log) { - Some(kps) => Arc::new(kps), - None => { - return Err("Unable to locate validator key pairs, nothing to do.".into()); - } - }; + let keypairs = Arc::new(client_config.fetch_keys(&log)?); let slots_per_epoch = E::slots_per_epoch(); @@ -244,7 +252,6 @@ impl Service(|| { "Genesis is not in the past. Exiting.".into() })?; @@ -252,7 +259,7 @@ impl Service Service error_chain::Result<()> { - let current_slot = match self.slot_clock.present_slot() { - Err(e) => { - error!(self.log, "SystemTimeError {:?}", e); - return Err("Could not read system time".into()); - } - Ok(slot) => slot.ok_or_else::(|| { + let current_slot = self + .slot_clock + .now() + .ok_or_else::(|| { "Genesis is not in the past. Exiting.".into() - })?, - }; + })?; let current_epoch = current_slot.epoch(self.slots_per_epoch);