diff --git a/Cargo.toml b/Cargo.toml index 3b89c51243..397de70fee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "eth2/utils/cached_tree_hash", "eth2/utils/compare_fields", "eth2/utils/compare_fields_derive", + "eth2/utils/eth2_config", "eth2/utils/fixed_len_vec", "eth2/utils/hashing", "eth2/utils/honey-badger-split", diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 7b561869a7..48504d89ad 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -12,3 +12,4 @@ slog-term = "^2.4.0" slog-async = "^2.3.0" validator_client = { path = "../validator_client" } types = { path = "../eth2/types" } +eth2_config = { path = "../eth2/utils/eth2_config" } diff --git a/account_manager/src/main.rs b/account_manager/src/main.rs index c30b5b103d..d820321013 100644 --- a/account_manager/src/main.rs +++ b/account_manager/src/main.rs @@ -1,9 +1,13 @@ use bls::Keypair; use clap::{App, Arg, SubCommand}; -use slog::{debug, info, o, Drain}; +use slog::{crit, debug, info, o, Drain}; use std::path::PathBuf; use types::test_utils::generate_deterministic_keypair; use validator_client::Config as ValidatorClientConfig; +use eth2_config::{get_data_dir}; + +pub const DEFAULT_DATA_DIR: &str = ".lighthouse-account-manager"; +pub const CLIENT_CONFIG_FILENAME: &str = "account-manager-config.toml"; fn main() { // Logging @@ -20,6 +24,7 @@ fn main() { .arg( Arg::with_name("datadir") .long("datadir") + .short("d") .value_name("DIR") .help("Data directory for keys and databases.") .takes_value(true), @@ -43,49 +48,105 @@ fn main() { .help("The index of the validator, for which the test key is generated") .takes_value(true) .required(true), - ), + ) + .arg( + Arg::with_name("validator count") + .long("validator_count") + .short("n") + .value_name("validator_count") + .help("If supplied along with `index`, generates keys `i..i + n`.") + .takes_value(true) + .default_value("1"), + ) ) .get_matches(); - let config = ValidatorClientConfig::parse_args(&matches, &log) - .expect("Unable to build a configuration for the account manager."); + let data_dir = match get_data_dir(&matches, PathBuf::from(DEFAULT_DATA_DIR)) { + Ok(dir) => dir, + Err(e) => { + crit!(log, "Failed to initialize data dir"; "error" => format!("{:?}", e)); + return + } + }; + + let mut client_config = ValidatorClientConfig::default(); + + if let Err(e) = client_config.apply_cli_args(&matches) { + crit!(log, "Failed to apply CLI args"; "error" => format!("{:?}", e)); + return + }; + + // Ensure the `data_dir` in the config matches that supplied to the CLI. + client_config.data_dir = data_dir.clone(); + + // Update the client config with any CLI args. + match client_config.apply_cli_args(&matches) { + Ok(()) => (), + Err(s) => { + crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); + return; + } + }; // Log configuration info!(log, ""; - "data_dir" => &config.data_dir.to_str()); + "data_dir" => &client_config.data_dir.to_str()); match matches.subcommand() { - ("generate", Some(_gen_m)) => { - let keypair = Keypair::random(); - let key_path: PathBuf = config - .save_key(&keypair) - .expect("Unable to save newly generated private key."); - debug!( - log, - "Keypair generated {:?}, saved to: {:?}", - keypair.identifier(), - key_path.to_string_lossy() - ); - } - ("generate_deterministic", Some(gen_d_matches)) => { - let validator_index = gen_d_matches - .value_of("validator index") - .expect("Validator index required.") - .parse::() - .expect("Invalid validator index.") as usize; - let keypair = generate_deterministic_keypair(validator_index); - let key_path: PathBuf = config - .save_key(&keypair) - .expect("Unable to save newly generated deterministic private key."); - debug!( - log, - "Deterministic Keypair generated {:?}, saved to: {:?}", - keypair.identifier(), - key_path.to_string_lossy() - ); + ("generate", Some(_)) => generate_random(&client_config, &log), + ("generate_deterministic", Some(m)) => { + if let Some(string) = m.value_of("validator index") { + let i: usize = string.parse().expect("Invalid validator index"); + if let Some(string) = m.value_of("validator count") { + let n: usize = string.parse().expect("Invalid end validator count"); + + let indices: Vec = (i..i + n).collect(); + generate_deterministic_multiple(&indices, &client_config, &log) + } else { + generate_deterministic(i, &client_config, &log) + } + } } _ => panic!( "The account manager must be run with a subcommand. See help for more information." ), } } + +fn generate_random(config: &ValidatorClientConfig, log: &slog::Logger) { + save_key(&Keypair::random(), config, log) +} + +fn generate_deterministic_multiple( + validator_indices: &[usize], + config: &ValidatorClientConfig, + log: &slog::Logger, +) { + for validator_index in validator_indices { + generate_deterministic(*validator_index, config, log) + } +} + +fn generate_deterministic( + validator_index: usize, + config: &ValidatorClientConfig, + log: &slog::Logger, +) { + save_key( + &generate_deterministic_keypair(validator_index), + config, + log, + ) +} + +fn save_key(keypair: &Keypair, config: &ValidatorClientConfig, log: &slog::Logger) { + let key_path: PathBuf = config + .save_key(&keypair) + .expect("Unable to save newly generated private key."); + debug!( + log, + "Keypair generated {:?}, saved to: {:?}", + keypair.identifier(), + key_path.to_string_lossy() + ); +} diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index d78a5b5960..309f162e55 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -5,11 +5,14 @@ authors = ["Paul Hauner ", "Age Manning bool { - match self { - BlockProcessingOutcome::ValidBlock(_) => false, - BlockProcessingOutcome::InvalidBlock(r) => match r { - InvalidBlock::FutureSlot { .. } => true, - InvalidBlock::StateRootMismatch => true, - InvalidBlock::ParentUnknown => false, - InvalidBlock::SlotProcessingError(_) => false, - InvalidBlock::PerBlockProcessingError(e) => match e { - BlockProcessingError::Invalid(_) => true, - BlockProcessingError::BeaconStateError(_) => false, - }, - }, - } - } - - /// Returns `true` if the block was successfully processed and can be removed from any import - /// queues or temporary storage. - pub fn sucessfully_processed(&self) -> bool { - match self { - BlockProcessingOutcome::ValidBlock(_) => true, - _ => false, - } - } -} - pub trait BeaconChainTypes { type Store: store::Store; type SlotClock: slot_clock::SlotClock; - type ForkChoice: fork_choice::ForkChoice; + type ForkChoice: fork_choice::ForkChoice; type EthSpec: types::EthSpec; } +/// Represents the "Beacon Chain" component of Ethereum 2.0. Allows import of blocks and block +/// operations and chooses a canonical head. pub struct BeaconChain { - pub store: Arc, - pub slot_clock: T::SlotClock, - pub op_pool: OperationPool, - canonical_head: RwLock>, - finalized_head: RwLock>, - pub state: RwLock>, pub spec: ChainSpec, + /// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB. + pub store: Arc, + /// Reports the current slot, typically based upon the system clock. + pub slot_clock: T::SlotClock, + /// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for + /// inclusion in a block. + pub op_pool: OperationPool, + /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was recieved. + canonical_head: RwLock>, + /// The same state from `self.canonical_head`, but updated at the start of each slot with a + /// skip slot if no block is recieved. This is effectively a cache that avoids repeating calls + /// to `per_slot_processing`. + state: RwLock>, + /// The root of the genesis block. + genesis_block_root: Hash256, + /// A state-machine that is updated with information from the network and chooses a canonical + /// head block. pub fork_choice: RwLock, + /// Stores metrics about this `BeaconChain`. + pub metrics: Metrics, } impl BeaconChain { @@ -111,18 +91,16 @@ impl BeaconChain { let state_root = genesis_state.canonical_root(); store.put(&state_root, &genesis_state)?; - let block_root = genesis_block.block_header().canonical_root(); - store.put(&block_root, &genesis_block)?; + let genesis_block_root = genesis_block.block_header().canonical_root(); + store.put(&genesis_block_root, &genesis_block)?; + + // Also store the genesis block under the `ZERO_HASH` key. + let genesis_block_root = genesis_block.block_header().canonical_root(); + store.put(&spec.zero_hash, &genesis_block)?; - let finalized_head = RwLock::new(CheckPoint::new( - genesis_block.clone(), - block_root, - genesis_state.clone(), - state_root, - )); let canonical_head = RwLock::new(CheckPoint::new( genesis_block.clone(), - block_root, + genesis_block_root, genesis_state.clone(), state_root, )); @@ -130,17 +108,65 @@ impl BeaconChain { genesis_state.build_all_caches(&spec)?; Ok(Self { + spec, store, slot_clock, op_pool: OperationPool::new(), state: RwLock::new(genesis_state), - finalized_head, canonical_head, - spec, + genesis_block_root, fork_choice: RwLock::new(fork_choice), + metrics: Metrics::new()?, }) } + /// Attempt to load an existing instance from the given `store`. + pub fn from_store( + store: Arc, + spec: ChainSpec, + ) -> Result>, Error> { + let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); + let p: PersistedBeaconChain = match store.get(&key) { + Err(e) => return Err(e.into()), + Ok(None) => return Ok(None), + Ok(Some(p)) => p, + }; + + let slot_clock = T::SlotClock::new( + spec.genesis_slot, + p.state.genesis_time, + spec.seconds_per_slot, + ); + + let fork_choice = T::ForkChoice::new(store.clone()); + + Ok(Some(BeaconChain { + spec, + store, + slot_clock, + op_pool: OperationPool::default(), + canonical_head: RwLock::new(p.canonical_head), + state: RwLock::new(p.state), + fork_choice: RwLock::new(fork_choice), + genesis_block_root: p.genesis_block_root, + metrics: Metrics::new()?, + })) + } + + /// Attempt to save this instance to `self.store`. + pub fn persist(&self) -> Result<(), Error> { + let p: PersistedBeaconChain = PersistedBeaconChain { + canonical_head: self.canonical_head.read().clone(), + genesis_block_root: self.genesis_block_root, + state: self.state.read().clone(), + }; + + let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); + self.store.put(&key, &p)?; + + Ok(()) + } + /// Returns the beacon block body for each beacon block root in `roots`. /// /// Fails if any root in `roots` does not have a corresponding block. @@ -149,7 +175,7 @@ impl BeaconChain { .iter() .map(|root| match self.get_block(root)? { Some(block) => Ok(block.body), - None => Err(Error::DBInconsistent("Missing block".into())), + None => Err(Error::DBInconsistent(format!("Missing block: {}", root))), }) .collect(); @@ -170,85 +196,24 @@ impl BeaconChain { Ok(headers?) } - - /// Returns `count `beacon block roots, starting from `start_slot` with an - /// interval of `skip` slots between each root. + /// Iterate in reverse (highest to lowest slot) through all blocks from the block at `slot` + /// through to the genesis block. /// - /// ## Errors: + /// Returns `None` for headers prior to genesis or when there is an error reading from `Store`. /// - /// - `SlotOutOfBounds`: Unable to return the full specified range. - /// - `SlotOutOfBounds`: Unable to load a state from the DB. - /// - `SlotOutOfBounds`: Start slot is higher than the first slot. - /// - Other: BeaconState` is inconsistent. - pub fn get_block_roots( - &self, - earliest_slot: Slot, - count: usize, - skip: usize, - ) -> Result, Error> { - let step_by = Slot::from(skip + 1); + /// Contains duplicate headers when skip slots are encountered. + pub fn rev_iter_blocks(&self, slot: Slot) -> BlockIterator { + BlockIterator::new(self.store.clone(), self.state.read().clone(), slot) + } - let mut roots: Vec = vec![]; - - // The state for reading block roots. Will be updated with an older state if slots go too - // far back in history. - let mut state = self.state.read().clone(); - - // The final slot in this series, will be reduced by `skip` each loop iteration. - let mut slot = earliest_slot + Slot::from(count * (skip + 1)) - 1; - - // If the highest slot requested is that of the current state insert the root of the - // head block, unless the head block's slot is not matching. - if slot == state.slot && self.head().beacon_block.slot == slot { - roots.push(self.head().beacon_block_root); - - slot -= step_by; - } else if slot >= state.slot { - return Err(BeaconStateError::SlotOutOfBounds.into()); - } - - loop { - // If the slot is within the range of the current state's block roots, append the root - // to the output vec. - // - // If we get `SlotOutOfBounds` error, load the oldest available historic - // state from the DB. - match state.get_block_root(slot) { - Ok(root) => { - if slot < earliest_slot { - break; - } else { - roots.push(*root); - slot -= step_by; - } - } - Err(BeaconStateError::SlotOutOfBounds) => { - // Read the earliest historic state in the current slot. - let earliest_historic_slot = - state.slot - Slot::from(T::EthSpec::slots_per_historical_root()); - // Load the earlier state from disk. - let new_state_root = state.get_state_root(earliest_historic_slot)?; - - // Break if the DB is unable to load the state. - state = match self.store.get(&new_state_root) { - Ok(Some(state)) => state, - _ => break, - } - } - Err(e) => return Err(e.into()), - }; - } - - // Return the results if they pass a sanity check. - if (slot <= earliest_slot) && (roots.len() == count) { - // Reverse the ordering of the roots. We extracted them in reverse order to make it - // simpler to lookup historic states. - // - // This is a potential optimisation target. - Ok(roots.iter().rev().cloned().collect()) - } else { - Err(BeaconStateError::SlotOutOfBounds.into()) - } + /// Iterates in reverse (highest to lowest slot) through all block roots from `slot` through to + /// genesis. + /// + /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. + /// + /// Contains duplicate roots when skip slots are encountered. + pub fn rev_iter_block_roots(&self, slot: Slot) -> BlockRootsIterator { + BlockRootsIterator::new(self.store.clone(), self.state.read().clone(), slot) } /// Returns the block at the given root, if any. @@ -260,25 +225,41 @@ impl BeaconChain { Ok(self.store.get(block_root)?) } - /// Update the canonical head to some new values. - pub fn update_canonical_head( - &self, - new_beacon_block: BeaconBlock, - new_beacon_block_root: Hash256, - new_beacon_state: BeaconState, - new_beacon_state_root: Hash256, - ) { - debug!( - "Updating canonical head with block at slot: {}", - new_beacon_block.slot - ); - let mut head = self.canonical_head.write(); - head.update( - new_beacon_block, - new_beacon_block_root, - new_beacon_state, - new_beacon_state_root, - ); + /// Update the canonical head to `new_head`. + fn update_canonical_head(&self, new_head: CheckPoint) -> Result<(), Error> { + // Update the checkpoint that stores the head of the chain at the time it received the + // block. + *self.canonical_head.write() = new_head; + + // Update the always-at-the-present-slot state we keep around for performance gains. + *self.state.write() = { + let mut state = self.canonical_head.read().beacon_state.clone(); + + let present_slot = match self.slot_clock.present_slot() { + Ok(Some(slot)) => slot, + _ => return Err(Error::UnableToReadSlot), + }; + + // If required, transition the new state to the present slot. + for _ in state.slot.as_u64()..present_slot.as_u64() { + per_slot_processing(&mut state, &self.spec)?; + } + + state.build_all_caches(&self.spec)?; + + state + }; + + // Save `self` to `self.store`. + self.persist()?; + + Ok(()) + } + + /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been + /// updated to match the current slot clock. + pub fn current_state(&self) -> RwLockReadGuard> { + self.state.read() } /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the @@ -291,32 +272,15 @@ impl BeaconChain { self.canonical_head.read() } - /// Updates the canonical `BeaconState` with the supplied state. - /// - /// Advances the chain forward to the present slot. This method is better than just setting - /// state and calling `catchup_state` as it will not result in an old state being installed and - /// then having it iteratively updated -- in such a case it's possible for another thread to - /// find the state at an old slot. - pub fn update_state(&self, mut state: BeaconState) -> Result<(), Error> { - let present_slot = match self.slot_clock.present_slot() { - Ok(Some(slot)) => slot, - _ => return Err(Error::UnableToReadSlot), - }; - - // If required, transition the new state to the present slot. - for _ in state.slot.as_u64()..present_slot.as_u64() { - per_slot_processing(&mut state, &self.spec)?; - } - - state.build_all_caches(&self.spec)?; - - *self.state.write() = state; - - Ok(()) + /// Returns the slot of the highest block in the canonical chain. + pub fn best_slot(&self) -> Slot { + self.canonical_head.read().beacon_block.slot } /// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`. pub fn catchup_state(&self) -> Result<(), Error> { + let spec = &self.spec; + let present_slot = match self.slot_clock.present_slot() { Ok(Some(slot)) => slot, _ => return Err(Error::UnableToReadSlot), @@ -327,12 +291,12 @@ impl BeaconChain { // If required, transition the new state to the present slot. for _ in state.slot.as_u64()..present_slot.as_u64() { // Ensure the next epoch state caches are built in case of an epoch transition. - state.build_committee_cache(RelativeEpoch::Next, &self.spec)?; + state.build_committee_cache(RelativeEpoch::Next, spec)?; - per_slot_processing(&mut *state, &self.spec)?; + per_slot_processing(&mut *state, spec)?; } - state.build_all_caches(&self.spec)?; + state.build_all_caches(spec)?; Ok(()) } @@ -346,29 +310,6 @@ impl BeaconChain { Ok(()) } - /// Update the justified head to some new values. - pub fn update_finalized_head( - &self, - new_beacon_block: BeaconBlock, - new_beacon_block_root: Hash256, - new_beacon_state: BeaconState, - new_beacon_state_root: Hash256, - ) { - let mut finalized_head = self.finalized_head.write(); - finalized_head.update( - new_beacon_block, - new_beacon_block_root, - new_beacon_state, - new_beacon_state_root, - ); - } - - /// Returns a read-lock guarded `CheckPoint` struct for reading the justified head (as chosen, - /// indirectly, by the fork-choice rule). - pub fn finalized_head(&self) -> RwLockReadGuard> { - self.finalized_head.read() - } - /// Returns the validator index (if any) for the given public key. /// /// Information is retrieved from the present `beacon_state.validator_registry`. @@ -407,13 +348,12 @@ impl BeaconChain { /// genesis. pub fn slots_since_genesis(&self) -> Option { let now = self.read_slot_clock()?; + let genesis_slot = self.spec.genesis_slot; - if now < self.spec.genesis_slot { + if now < genesis_slot { None } else { - Some(SlotHeight::from( - now.as_u64() - self.spec.genesis_slot.as_u64(), - )) + Some(SlotHeight::from(now.as_u64() - genesis_slot.as_u64())) } } @@ -469,15 +409,19 @@ impl BeaconChain { /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. pub fn produce_attestation_data(&self, shard: u64) -> Result { - trace!("BeaconChain::produce_attestation: shard: {}", shard); + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + + self.metrics.attestation_production_requests.inc(); + let timer = self.metrics.attestation_production_times.start_timer(); + let state = self.state.read(); let current_epoch_start_slot = self .state .read() .slot - .epoch(self.spec.slots_per_epoch) - .start_slot(self.spec.slots_per_epoch); + .epoch(slots_per_epoch) + .start_slot(slots_per_epoch); let target_root = if state.slot == current_epoch_start_slot { // If we're on the first slot of the state's epoch. @@ -490,7 +434,7 @@ impl BeaconChain { *self .state .read() - .get_block_root(current_epoch_start_slot - self.spec.slots_per_epoch)? + .get_block_root(current_epoch_start_slot - slots_per_epoch)? } } else { // If we're not on the first slot of the epoch. @@ -500,6 +444,9 @@ impl BeaconChain { let previous_crosslink_root = Hash256::from_slice(&state.get_current_crosslink(shard)?.tree_hash_root()); + self.metrics.attestation_production_successes.inc(); + timer.observe_duration(); + Ok(AttestationData { beacon_block_root: self.head().beacon_block_root, source_epoch: state.current_justified_epoch, @@ -520,8 +467,20 @@ impl BeaconChain { &self, attestation: Attestation, ) -> Result<(), AttestationValidationError> { - self.op_pool - .insert_attestation(attestation, &*self.state.read(), &self.spec) + self.metrics.attestation_processing_requests.inc(); + let timer = self.metrics.attestation_processing_times.start_timer(); + + let result = self + .op_pool + .insert_attestation(attestation, &*self.state.read(), &self.spec); + + if result.is_ok() { + self.metrics.attestation_processing_successes.inc(); + } + + timer.observe_duration(); + + result } /// Accept some deposit and queue it for inclusion in an appropriate block. @@ -567,19 +526,39 @@ impl BeaconChain { /// /// Will accept blocks from prior slots, however it will reject any block from a future slot. pub fn process_block(&self, block: BeaconBlock) -> Result { - debug!("Processing block with slot {}...", block.slot); + self.metrics.block_processing_requests.inc(); + let timer = self.metrics.block_processing_times.start_timer(); + + let finalized_slot = self + .state + .read() + .finalized_epoch + .start_slot(T::EthSpec::slots_per_epoch()); + if block.slot <= finalized_slot { + return Ok(BlockProcessingOutcome::FinalizedSlot); + } + + if block.slot == 0 { + return Ok(BlockProcessingOutcome::GenesisBlock); + } let block_root = block.block_header().canonical_root(); + if block_root == self.genesis_block_root { + return Ok(BlockProcessingOutcome::GenesisBlock); + } + let present_slot = self.present_slot(); if block.slot > present_slot { - return Ok(BlockProcessingOutcome::InvalidBlock( - InvalidBlock::FutureSlot { - present_slot, - block_slot: block.slot, - }, - )); + return Ok(BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot: block.slot, + }); + } + + if self.store.exists::(&block_root)? { + return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown); } // Load the blocks parent block from the database, returning invalid if that block is not @@ -588,9 +567,9 @@ impl BeaconChain { let parent_block: BeaconBlock = match self.store.get(&parent_block_root)? { Some(previous_block_root) => previous_block_root, None => { - return Ok(BlockProcessingOutcome::InvalidBlock( - InvalidBlock::ParentUnknown, - )); + return Ok(BlockProcessingOutcome::ParentUnknown { + parent: parent_block_root, + }); } }; @@ -608,50 +587,49 @@ impl BeaconChain { // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; for _ in state.slot.as_u64()..block.slot.as_u64() { - if let Err(e) = per_slot_processing(&mut state, &self.spec) { - return Ok(BlockProcessingOutcome::InvalidBlock( - InvalidBlock::SlotProcessingError(e), - )); - } + per_slot_processing(&mut state, &self.spec)?; } + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + // Apply the received block to its parent state (which has been transitioned into this // slot). - if let Err(e) = per_block_processing(&mut state, &block, &self.spec) { - return Ok(BlockProcessingOutcome::InvalidBlock( - InvalidBlock::PerBlockProcessingError(e), - )); + match per_block_processing(&mut state, &block, &self.spec) { + Err(BlockProcessingError::BeaconStateError(e)) => { + return Err(Error::BeaconStateError(e)) + } + Err(e) => return Ok(BlockProcessingOutcome::PerBlockProcessingError(e)), + _ => {} } let state_root = state.canonical_root(); if block.state_root != state_root { - return Ok(BlockProcessingOutcome::InvalidBlock( - InvalidBlock::StateRootMismatch, - )); + return Ok(BlockProcessingOutcome::StateRootMismatch); } // Store the block and state. self.store.put(&block_root, &block)?; self.store.put(&state_root, &state)?; - // run the fork_choice add_block logic + // Register the new block with the fork choice service. self.fork_choice .write() .add_block(&block, &block_root, &self.spec)?; - // If the parent block was the parent_block, automatically update the canonical head. + // Execute the fork choice algorithm, enthroning a new head if discovered. // - // TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be - // run instead. - if self.head().beacon_block_root == parent_block_root { - self.update_canonical_head(block.clone(), block_root, state.clone(), state_root); + // Note: in the future we may choose to run fork-choice less often, potentially based upon + // some heuristic around number of attestations seen for the block. + self.fork_choice()?; - // Update the canonical `BeaconState`. - self.update_state(state)?; - } + self.metrics.block_processing_successes.inc(); + self.metrics + .operations_per_block_attestation + .observe(block.body.attestations.len() as f64); + timer.observe_duration(); - Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed)) + Ok(BlockProcessingOutcome::Processed) } /// Produce a new block at the present slot. @@ -663,6 +641,8 @@ impl BeaconChain { randao_reveal: Signature, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { debug!("Producing block at slot {}...", self.state.read().slot); + self.metrics.block_production_requests.inc(); + let timer = self.metrics.block_production_times.start_timer(); let mut state = self.state.read().clone(); @@ -670,9 +650,13 @@ impl BeaconChain { trace!("Finding attestations for new block..."); - let previous_block_root = *state - .get_block_root(state.slot - 1) - .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?; + let previous_block_root = if state.slot > 0 { + *state + .get_block_root(state.slot - 1) + .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)? + } else { + state.latest_block_header.canonical_root() + }; let (proposer_slashings, attester_slashings) = self.op_pool.get_slashings(&*self.state.read(), &self.spec); @@ -716,35 +700,63 @@ impl BeaconChain { block.state_root = state_root; + self.metrics.block_production_successes.inc(); + timer.observe_duration(); + Ok((block, state)) } - // TODO: Left this as is, modify later + /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { - let present_head = self.finalized_head().beacon_block_root; + self.metrics.fork_choice_requests.inc(); - let new_head = self + // Start fork choice metrics timer. + let timer = self.metrics.fork_choice_times.start_timer(); + + let justified_root = { + let root = self.head().beacon_state.current_justified_root; + if root == self.spec.zero_hash { + self.genesis_block_root + } else { + root + } + }; + + // Determine the root of the block that is the head of the chain. + let beacon_block_root = self .fork_choice .write() - .find_head(&present_head, &self.spec)?; + .find_head(&justified_root, &self.spec)?; - if new_head != present_head { - let block: BeaconBlock = self + // End fork choice metrics timer. + timer.observe_duration(); + + // If a new head was chosen. + if beacon_block_root != self.head().beacon_block_root { + self.metrics.fork_choice_changed_head.inc(); + + let beacon_block: BeaconBlock = self .store - .get(&new_head)? - .ok_or_else(|| Error::MissingBeaconBlock(new_head))?; - let block_root = block.canonical_root(); + .get(&beacon_block_root)? + .ok_or_else(|| Error::MissingBeaconBlock(beacon_block_root))?; - let state: BeaconState = self + let beacon_state_root = beacon_block.state_root; + let beacon_state: BeaconState = self .store - .get(&block.state_root)? - .ok_or_else(|| Error::MissingBeaconState(block.state_root))?; - let state_root = state.canonical_root(); + .get(&beacon_state_root)? + .ok_or_else(|| Error::MissingBeaconState(beacon_state_root))?; - self.update_canonical_head(block, block_root, state.clone(), state_root); + // If we switched to a new chain (instead of building atop the present chain). + if self.head().beacon_block_root != beacon_block.previous_block_root { + self.metrics.fork_choice_reorg_count.inc(); + }; - // Update the canonical `BeaconState`. - self.update_state(state)?; + self.update_canonical_head(CheckPoint { + beacon_block, + beacon_block_root, + beacon_state, + beacon_state_root, + })?; } Ok(()) diff --git a/beacon_node/beacon_chain/src/checkpoint.rs b/beacon_node/beacon_chain/src/checkpoint.rs index c069ac1042..c25e75a85d 100644 --- a/beacon_node/beacon_chain/src/checkpoint.rs +++ b/beacon_node/beacon_chain/src/checkpoint.rs @@ -1,9 +1,10 @@ use serde_derive::Serialize; +use ssz_derive::{Decode, Encode}; use types::{BeaconBlock, BeaconState, EthSpec, Hash256}; /// Represents some block and it's associated state. Generally, this will be used for tracking the /// head, justified head and finalized head. -#[derive(Clone, Serialize, PartialEq, Debug)] +#[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)] pub struct CheckPoint { pub beacon_block: BeaconBlock, pub beacon_block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 73884916aa..75f2fd84dd 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,3 +1,4 @@ +use crate::metrics::Error as MetricsError; use fork_choice::ForkChoiceError; use state_processing::BlockProcessingError; use state_processing::SlotProcessingError; @@ -25,10 +26,17 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), + MetricsError(String), } easy_from_to!(SlotProcessingError, BeaconChainError); +impl From for BeaconChainError { + fn from(e: MetricsError) -> BeaconChainError { + BeaconChainError::MetricsError(format!("{:?}", e)) + } +} + #[derive(Debug, PartialEq)] pub enum BlockProductionError { UnableToGetBlockRootFromState, diff --git a/beacon_node/beacon_chain/src/iter.rs b/beacon_node/beacon_chain/src/iter.rs new file mode 100644 index 0000000000..1b5e382b02 --- /dev/null +++ b/beacon_node/beacon_chain/src/iter.rs @@ -0,0 +1,133 @@ +use std::sync::Arc; +use store::Store; +use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot}; + +/// Extends `BlockRootsIterator`, returning `BeaconBlock` instances, instead of their roots. +pub struct BlockIterator { + roots: BlockRootsIterator, +} + +impl BlockIterator { + /// Create a new iterator over all blocks in the given `beacon_state` and prior states. + pub fn new(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { + Self { + roots: BlockRootsIterator::new(store, beacon_state, start_slot), + } + } +} + +impl Iterator for BlockIterator { + type Item = BeaconBlock; + + fn next(&mut self) -> Option { + let root = self.roots.next()?; + self.roots.store.get(&root).ok()? + } +} + +/// Iterates backwards through block roots. +/// +/// Uses the `latest_block_roots` field of `BeaconState` to as the source of block roots and will +/// perform a lookup on the `Store` for a prior `BeaconState` if `latest_block_roots` has been +/// exhausted. +/// +/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. +pub struct BlockRootsIterator { + store: Arc, + beacon_state: BeaconState, + slot: Slot, +} + +impl BlockRootsIterator { + /// Create a new iterator over all block roots in the given `beacon_state` and prior states. + pub fn new(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { + Self { + slot: start_slot, + beacon_state, + store, + } + } +} + +impl Iterator for BlockRootsIterator { + type Item = Hash256; + + fn next(&mut self) -> Option { + if (self.slot == 0) || (self.slot > self.beacon_state.slot) { + return None; + } + + self.slot -= 1; + + match self.beacon_state.get_block_root(self.slot) { + Ok(root) => Some(*root), + Err(BeaconStateError::SlotOutOfBounds) => { + // Read a `BeaconState` from the store that has access to prior historical root. + self.beacon_state = { + // Load the earlier state from disk. Skip forward one slot, because a state + // doesn't return it's own state root. + let new_state_root = self.beacon_state.get_state_root(self.slot + 1).ok()?; + + self.store.get(&new_state_root).ok()? + }?; + + self.beacon_state.get_block_root(self.slot).ok().cloned() + } + _ => None, + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use store::MemoryStore; + use types::{test_utils::TestingBeaconStateBuilder, Keypair, MainnetEthSpec}; + + fn get_state() -> BeaconState { + let builder = TestingBeaconStateBuilder::from_single_keypair( + 0, + &Keypair::random(), + &T::default_spec(), + ); + let (state, _keypairs) = builder.build(); + state + } + + #[test] + fn root_iter() { + let store = Arc::new(MemoryStore::open()); + let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); + + let mut state_a: BeaconState = get_state(); + let mut state_b: BeaconState = get_state(); + + state_a.slot = Slot::from(slots_per_historical_root); + state_b.slot = Slot::from(slots_per_historical_root * 2); + + let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); + + for root in &mut state_a.latest_block_roots[..] { + *root = hashes.next().unwrap() + } + for root in &mut state_b.latest_block_roots[..] { + *root = hashes.next().unwrap() + } + + let state_a_root = hashes.next().unwrap(); + state_b.latest_state_roots[0] = state_a_root; + store.put(&state_a_root, &state_a).unwrap(); + + let iter = BlockRootsIterator::new(store.clone(), state_b.clone(), state_b.slot - 1); + let mut collected: Vec = iter.collect(); + collected.reverse(); + + let expected_len = 2 * MainnetEthSpec::slots_per_historical_root() - 1; + + assert_eq!(collected.len(), expected_len); + + for i in 0..expected_len { + assert_eq!(collected[i], Hash256::from(i as u64)); + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 9f3058d0bc..21edb78598 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,10 +1,11 @@ mod beacon_chain; mod checkpoint; mod errors; +pub mod iter; +mod metrics; +mod persisted_beacon_chain; -pub use self::beacon_chain::{ - BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InvalidBlock, ValidBlock, -}; +pub use self::beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use fork_choice; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs new file mode 100644 index 0000000000..fa1718ebfb --- /dev/null +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -0,0 +1,143 @@ +pub use prometheus::Error; +use prometheus::{Histogram, HistogramOpts, IntCounter, Opts, Registry}; + +pub struct Metrics { + pub block_processing_requests: IntCounter, + pub block_processing_successes: IntCounter, + pub block_processing_times: Histogram, + pub block_production_requests: IntCounter, + pub block_production_successes: IntCounter, + pub block_production_times: Histogram, + pub attestation_production_requests: IntCounter, + pub attestation_production_successes: IntCounter, + pub attestation_production_times: Histogram, + pub attestation_processing_requests: IntCounter, + pub attestation_processing_successes: IntCounter, + pub attestation_processing_times: Histogram, + pub fork_choice_requests: IntCounter, + pub fork_choice_changed_head: IntCounter, + pub fork_choice_reorg_count: IntCounter, + pub fork_choice_times: Histogram, + pub operations_per_block_attestation: Histogram, +} + +impl Metrics { + pub fn new() -> Result { + Ok(Self { + block_processing_requests: { + let opts = Opts::new("block_processing_requests", "total_blocks_processed"); + IntCounter::with_opts(opts)? + }, + block_processing_successes: { + let opts = Opts::new("block_processing_successes", "total_valid_blocks_processed"); + IntCounter::with_opts(opts)? + }, + block_processing_times: { + let opts = HistogramOpts::new("block_processing_times", "block_processing_time"); + Histogram::with_opts(opts)? + }, + block_production_requests: { + let opts = Opts::new("block_production_requests", "attempts_to_produce_new_block"); + IntCounter::with_opts(opts)? + }, + block_production_successes: { + let opts = Opts::new("block_production_successes", "blocks_successfully_produced"); + IntCounter::with_opts(opts)? + }, + block_production_times: { + let opts = HistogramOpts::new("block_production_times", "block_production_time"); + Histogram::with_opts(opts)? + }, + attestation_production_requests: { + let opts = Opts::new( + "attestation_production_requests", + "total_attestation_production_requests", + ); + IntCounter::with_opts(opts)? + }, + attestation_production_successes: { + let opts = Opts::new( + "attestation_production_successes", + "total_attestation_production_successes", + ); + IntCounter::with_opts(opts)? + }, + attestation_production_times: { + let opts = HistogramOpts::new( + "attestation_production_times", + "attestation_production_time", + ); + Histogram::with_opts(opts)? + }, + attestation_processing_requests: { + let opts = Opts::new( + "attestation_processing_requests", + "total_attestation_processing_requests", + ); + IntCounter::with_opts(opts)? + }, + attestation_processing_successes: { + let opts = Opts::new( + "attestation_processing_successes", + "total_attestation_processing_successes", + ); + IntCounter::with_opts(opts)? + }, + attestation_processing_times: { + let opts = HistogramOpts::new( + "attestation_processing_times", + "attestation_processing_time", + ); + Histogram::with_opts(opts)? + }, + fork_choice_requests: { + let opts = Opts::new("fork_choice_requests", "total_times_fork_choice_called"); + IntCounter::with_opts(opts)? + }, + fork_choice_changed_head: { + let opts = Opts::new( + "fork_choice_changed_head", + "total_times_fork_choice_chose_a_new_head", + ); + IntCounter::with_opts(opts)? + }, + fork_choice_reorg_count: { + let opts = Opts::new("fork_choice_reorg_count", "number_of_reorgs"); + IntCounter::with_opts(opts)? + }, + fork_choice_times: { + let opts = HistogramOpts::new("fork_choice_time", "total_time_to_run_fork_choice"); + Histogram::with_opts(opts)? + }, + operations_per_block_attestation: { + let opts = HistogramOpts::new( + "operations_per_block_attestation", + "count_of_attestations_per_block", + ); + Histogram::with_opts(opts)? + }, + }) + } + + pub fn register(&self, registry: &Registry) -> Result<(), Error> { + registry.register(Box::new(self.block_processing_requests.clone()))?; + registry.register(Box::new(self.block_processing_successes.clone()))?; + registry.register(Box::new(self.block_processing_times.clone()))?; + registry.register(Box::new(self.block_production_requests.clone()))?; + registry.register(Box::new(self.block_production_successes.clone()))?; + registry.register(Box::new(self.block_production_times.clone()))?; + registry.register(Box::new(self.attestation_production_requests.clone()))?; + registry.register(Box::new(self.attestation_production_successes.clone()))?; + registry.register(Box::new(self.attestation_production_times.clone()))?; + registry.register(Box::new(self.attestation_processing_requests.clone()))?; + registry.register(Box::new(self.attestation_processing_successes.clone()))?; + registry.register(Box::new(self.attestation_processing_times.clone()))?; + registry.register(Box::new(self.fork_choice_requests.clone()))?; + registry.register(Box::new(self.fork_choice_changed_head.clone()))?; + registry.register(Box::new(self.fork_choice_reorg_count.clone()))?; + registry.register(Box::new(self.fork_choice_times.clone()))?; + registry.register(Box::new(self.operations_per_block_attestation.clone()))?; + + Ok(()) + } +} diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs new file mode 100644 index 0000000000..f5bdfdee15 --- /dev/null +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -0,0 +1,30 @@ +use crate::{BeaconChainTypes, CheckPoint}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use store::{DBColumn, Error as StoreError, StoreItem}; +use types::{BeaconState, Hash256}; + +/// 32-byte key for accessing the `PersistedBeaconChain`. +pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA"; + +#[derive(Encode, Decode)] +pub struct PersistedBeaconChain { + pub canonical_head: CheckPoint, + // TODO: operations pool. + pub genesis_block_root: Hash256, + pub state: BeaconState, +} + +impl StoreItem for PersistedBeaconChain { + fn db_column() -> DBColumn { + DBColumn::BeaconChain + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &mut [u8]) -> Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } +} diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 387bf16757..2b6f44e949 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -11,9 +11,13 @@ store = { path = "../store" } http_server = { path = "../http_server" } rpc = { path = "../rpc" } fork_choice = { path = "../../eth2/fork_choice" } +prometheus = "^0.6" types = { path = "../../eth2/types" } tree_hash = { path = "../../eth2/utils/tree_hash" } +eth2_config = { path = "../../eth2/utils/eth2_config" } slot_clock = { path = "../../eth2/utils/slot_clock" } +serde = "1.0" +serde_derive = "1.0" error-chain = "0.12.0" slog = "^2.2.3" ssz = { path = "../../eth2/utils/ssz" } diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index b8236c679f..c55c04b443 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -1,109 +1,92 @@ -use crate::ClientConfig; use beacon_chain::{ - fork_choice::BitwiseLMDGhost, - slot_clock::SystemTimeSlotClock, - store::{DiskStore, MemoryStore, Store}, - BeaconChain, BeaconChainTypes, + fork_choice::OptimizedLMDGhost, slot_clock::SystemTimeSlotClock, store::Store, BeaconChain, + BeaconChainTypes, }; +use fork_choice::ForkChoice; +use slog::{info, Logger}; +use slot_clock::SlotClock; +use std::marker::PhantomData; use std::sync::Arc; use tree_hash::TreeHash; -use types::{ - test_utils::TestingBeaconStateBuilder, BeaconBlock, EthSpec, FewValidatorsEthSpec, Hash256, -}; +use types::{test_utils::TestingBeaconStateBuilder, BeaconBlock, ChainSpec, EthSpec, Hash256}; + +/// The number initial validators when starting the `Minimal`. +const TESTNET_VALIDATOR_COUNT: usize = 16; /// Provides a new, initialized `BeaconChain` pub trait InitialiseBeaconChain { - fn initialise_beacon_chain(config: &ClientConfig) -> BeaconChain; -} - -/// A testnet-suitable BeaconChainType, using `MemoryStore`. -#[derive(Clone)] -pub struct TestnetMemoryBeaconChainTypes; - -impl BeaconChainTypes for TestnetMemoryBeaconChainTypes { - type Store = MemoryStore; - type SlotClock = SystemTimeSlotClock; - type ForkChoice = BitwiseLMDGhost; - type EthSpec = FewValidatorsEthSpec; -} - -impl InitialiseBeaconChain for TestnetMemoryBeaconChainTypes -where - T: BeaconChainTypes< - Store = MemoryStore, - SlotClock = SystemTimeSlotClock, - ForkChoice = BitwiseLMDGhost, - >, -{ - fn initialise_beacon_chain(_config: &ClientConfig) -> BeaconChain { - initialize_chain(MemoryStore::open()) + fn initialise_beacon_chain( + store: Arc, + spec: ChainSpec, + log: Logger, + ) -> BeaconChain { + maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, spec, log) } } -/// A testnet-suitable BeaconChainType, using `DiskStore`. #[derive(Clone)] -pub struct TestnetDiskBeaconChainTypes; - -impl BeaconChainTypes for TestnetDiskBeaconChainTypes { - type Store = DiskStore; - type SlotClock = SystemTimeSlotClock; - type ForkChoice = BitwiseLMDGhost; - type EthSpec = FewValidatorsEthSpec; +pub struct ClientType { + _phantom_t: PhantomData, + _phantom_u: PhantomData, } -impl InitialiseBeaconChain for TestnetDiskBeaconChainTypes -where - T: BeaconChainTypes< - Store = DiskStore, - SlotClock = SystemTimeSlotClock, - ForkChoice = BitwiseLMDGhost, - >, -{ - fn initialise_beacon_chain(config: &ClientConfig) -> BeaconChain { - let store = DiskStore::open(&config.db_name).expect("Unable to open DB."); +impl BeaconChainTypes for ClientType { + type Store = S; + type SlotClock = SystemTimeSlotClock; + type ForkChoice = OptimizedLMDGhost; + type EthSpec = E; +} +impl InitialiseBeaconChain for ClientType {} - initialize_chain(store) +/// Loads a `BeaconChain` from `store`, if it exists. Otherwise, create a new chain from genesis. +fn maybe_load_from_store_for_testnet( + store: Arc, + spec: ChainSpec, + log: Logger, +) -> BeaconChain +where + T: BeaconChainTypes, + T::ForkChoice: ForkChoice, +{ + if let Ok(Some(beacon_chain)) = BeaconChain::from_store(store.clone(), spec.clone()) { + info!( + log, + "Loaded BeaconChain from store"; + "slot" => beacon_chain.head().beacon_state.slot, + "best_slot" => beacon_chain.best_slot(), + ); + + beacon_chain + } else { + info!(log, "Initializing new BeaconChain from genesis"); + let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( + TESTNET_VALIDATOR_COUNT, + &spec, + ); + let (genesis_state, _keypairs) = state_builder.build(); + + let mut genesis_block = BeaconBlock::empty(&spec); + genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); + + // Slot clock + let slot_clock = T::SlotClock::new( + spec.genesis_slot, + genesis_state.genesis_time, + spec.seconds_per_slot, + ); + // Choose the fork choice + let fork_choice = T::ForkChoice::new(store.clone()); + + // Genesis chain + //TODO: Handle error correctly + BeaconChain::from_genesis( + store, + slot_clock, + genesis_state, + genesis_block, + spec, + fork_choice, + ) + .expect("Terminate if beacon chain generation fails") } } - -/// Produces a `BeaconChain` given some pre-initialized `Store`. -fn initialize_chain(store: U) -> BeaconChain -where - T: BeaconChainTypes< - Store = U, - SlotClock = SystemTimeSlotClock, - ForkChoice = BitwiseLMDGhost, - >, -{ - let spec = T::EthSpec::spec(); - - let store = Arc::new(store); - - let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, &spec); - let (genesis_state, _keypairs) = state_builder.build(); - - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - - // Slot clock - let slot_clock = SystemTimeSlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ) - .expect("Unable to load SystemTimeSlotClock"); - // Choose the fork choice - let fork_choice = BitwiseLMDGhost::new(store.clone()); - - // Genesis chain - //TODO: Handle error correctly - BeaconChain::from_genesis( - store, - slot_clock, - genesis_state, - genesis_block, - spec.clone(), - fork_choice, - ) - .expect("Terminate if beacon chain generation fails") -} diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index 243848e9f0..166725b61c 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -1,151 +1,67 @@ use clap::ArgMatches; -use fork_choice::ForkChoiceAlgorithm; use http_server::HttpServerConfig; use network::NetworkConfig; -use slog::error; +use serde_derive::{Deserialize, Serialize}; use std::fs; -use std::net::SocketAddr; -use std::net::{IpAddr, Ipv4Addr}; use std::path::PathBuf; -use types::multiaddr::Protocol; -use types::multiaddr::ToMultiaddr; -use types::Multiaddr; -use types::{ChainSpec, EthSpec, LighthouseTestnetEthSpec}; -#[derive(Debug, Clone)] -pub enum DBType { - Memory, - Disk, -} - -/// Stores the client configuration for this Lighthouse instance. -#[derive(Debug, Clone)] +/// The core configuration of a Lighthouse beacon node. +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct ClientConfig { pub data_dir: PathBuf, - pub spec: ChainSpec, - pub net_conf: network::NetworkConfig, - pub fork_choice: ForkChoiceAlgorithm, - pub db_type: DBType, - pub db_name: PathBuf, - pub rpc_conf: rpc::RPCConfig, - pub http_conf: HttpServerConfig, //pub ipc_conf: + pub db_type: String, + db_name: String, + pub network: network::NetworkConfig, + pub rpc: rpc::RPCConfig, + pub http: HttpServerConfig, } impl Default for ClientConfig { - /// Build a new lighthouse configuration from defaults. fn default() -> Self { - let data_dir = { - let home = dirs::home_dir().expect("Unable to determine home dir."); - home.join(".lighthouse/") - }; - fs::create_dir_all(&data_dir) - .unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir)); - - let default_spec = LighthouseTestnetEthSpec::spec(); - let default_net_conf = NetworkConfig::new(default_spec.boot_nodes.clone()); - Self { - data_dir: data_dir.clone(), - // default to foundation for chain specs - spec: default_spec, - net_conf: default_net_conf, - // default to bitwise LMD Ghost - fork_choice: ForkChoiceAlgorithm::BitwiseLMDGhost, - // default to memory db for now - db_type: DBType::Memory, - // default db name for disk-based dbs - db_name: data_dir.join("chain_db"), - rpc_conf: rpc::RPCConfig::default(), - http_conf: HttpServerConfig::default(), + data_dir: PathBuf::from(".lighthouse"), + db_type: "disk".to_string(), + db_name: "chain_db".to_string(), + // Note: there are no default bootnodes specified. + // Once bootnodes are established, add them here. + network: NetworkConfig::new(vec![]), + rpc: rpc::RPCConfig::default(), + http: HttpServerConfig::default(), } } } impl ClientConfig { - /// Parses the CLI arguments into a `Config` struct. - pub fn parse_args(args: ArgMatches, log: &slog::Logger) -> Result { - let mut config = ClientConfig::default(); + /// Returns the path to which the client may initialize an on-disk database. + pub fn db_path(&self) -> Option { + self.data_dir() + .and_then(|path| Some(path.join(&self.db_name))) + } - /* Network related arguments */ + /// Returns the core path for the client. + pub fn data_dir(&self) -> Option { + let path = dirs::home_dir()?.join(&self.data_dir); + fs::create_dir_all(&path).ok()?; + Some(path) + } - // Custom p2p listen port - if let Some(port_str) = args.value_of("port") { - if let Ok(port) = port_str.parse::() { - config.net_conf.listen_port = port; - // update the listening multiaddrs - for address in &mut config.net_conf.listen_addresses { - address.pop(); - address.append(Protocol::Tcp(port)); - } - } else { - error!(log, "Invalid port"; "port" => port_str); - return Err("Invalid port"); - } - } - // Custom listening address ipv4/ipv6 - // TODO: Handle list of addresses - if let Some(listen_address_str) = args.value_of("listen-address") { - if let Ok(listen_address) = listen_address_str.parse::() { - let multiaddr = SocketAddr::new(listen_address, config.net_conf.listen_port) - .to_multiaddr() - .expect("Invalid listen address format"); - config.net_conf.listen_addresses = vec![multiaddr]; - } else { - error!(log, "Invalid IP Address"; "Address" => listen_address_str); - return Err("Invalid IP Address"); - } - } - - // Custom bootnodes - if let Some(boot_addresses_str) = args.value_of("boot-nodes") { - let mut boot_addresses_split = boot_addresses_str.split(","); - for boot_address in boot_addresses_split { - if let Ok(boot_address) = boot_address.parse::() { - config.net_conf.boot_nodes.append(&mut vec![boot_address]); - } else { - error!(log, "Invalid Bootnode multiaddress"; "Multiaddr" => boot_addresses_str); - return Err("Invalid IP Address"); - } - } - } - - /* Filesystem related arguments */ - - // Custom datadir + /// Apply the following arguments to `self`, replacing values if they are specified in `args`. + /// + /// Returns an error if arguments are obviously invalid. May succeed even if some values are + /// invalid. + pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { if let Some(dir) = args.value_of("datadir") { - config.data_dir = PathBuf::from(dir.to_string()); + self.data_dir = PathBuf::from(dir); }; - /* RPC related arguments */ - - if args.is_present("rpc") { - config.rpc_conf.enabled = true; + if let Some(dir) = args.value_of("db") { + self.db_type = dir.to_string(); } - if let Some(rpc_address) = args.value_of("rpc-address") { - if let Ok(listen_address) = rpc_address.parse::() { - config.rpc_conf.listen_address = listen_address; - } else { - error!(log, "Invalid RPC listen address"; "Address" => rpc_address); - return Err("Invalid RPC listen address"); - } - } + self.network.apply_cli_args(args)?; + self.rpc.apply_cli_args(args)?; + self.http.apply_cli_args(args)?; - if let Some(rpc_port) = args.value_of("rpc-port") { - if let Ok(port) = rpc_port.parse::() { - config.rpc_conf.port = port; - } else { - error!(log, "Invalid RPC port"; "port" => rpc_port); - return Err("Invalid RPC port"); - } - } - - match args.value_of("db") { - Some("disk") => config.db_type = DBType::Disk, - Some("memory") => config.db_type = DBType::Memory, - _ => unreachable!(), // clap prevents this. - }; - - Ok(config) + Ok(()) } } diff --git a/beacon_node/client/src/error.rs b/beacon_node/client/src/error.rs index 618813826b..680ad8277d 100644 --- a/beacon_node/client/src/error.rs +++ b/beacon_node/client/src/error.rs @@ -1,14 +1,9 @@ -// generates error types use network; -use error_chain::{ - error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, - impl_extract_backtrace, -}; +use error_chain::error_chain; error_chain! { links { Network(network::error::Error, network::error::ErrorKind); } - } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 40be9b7b8e..92ed6e0227 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -6,10 +6,10 @@ pub mod error; pub mod notifier; use beacon_chain::BeaconChain; -use beacon_chain_types::InitialiseBeaconChain; use exit_future::Signal; use futures::{future::Future, Stream}; use network::Service as NetworkService; +use prometheus::Registry; use slog::{error, info, o}; use slot_clock::SlotClock; use std::marker::PhantomData; @@ -19,16 +19,18 @@ use tokio::runtime::TaskExecutor; use tokio::timer::Interval; pub use beacon_chain::BeaconChainTypes; -pub use beacon_chain_types::{TestnetDiskBeaconChainTypes, TestnetMemoryBeaconChainTypes}; -pub use client_config::{ClientConfig, DBType}; +pub use beacon_chain_types::ClientType; +pub use beacon_chain_types::InitialiseBeaconChain; +pub use client_config::ClientConfig; +pub use eth2_config::Eth2Config; /// Main beacon node client service. This provides the connection and initialisation of the clients /// sub-services in multiple threads. pub struct Client { /// Configuration for the lighthouse client. - _config: ClientConfig, + _client_config: ClientConfig, /// The beacon chain for the running client. - _beacon_chain: Arc>, + beacon_chain: Arc>, /// Reference to the network service. pub network: Arc>, /// Signal to terminate the RPC server. @@ -49,12 +51,27 @@ where { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( - config: ClientConfig, + client_config: ClientConfig, + eth2_config: Eth2Config, + store: T::Store, log: slog::Logger, executor: &TaskExecutor, ) -> error::Result { - // generate a beacon chain - let beacon_chain = Arc::new(T::initialise_beacon_chain(&config)); + let metrics_registry = Registry::new(); + let store = Arc::new(store); + let seconds_per_slot = eth2_config.spec.seconds_per_slot; + + // Load a `BeaconChain` from the store, or create a new one if it does not exist. + let beacon_chain = Arc::new(T::initialise_beacon_chain( + store, + eth2_config.spec.clone(), + log.clone(), + )); + // Registry all beacon chain metrics with the global registry. + beacon_chain + .metrics + .register(&metrics_registry) + .expect("Failed to registry metrics"); if beacon_chain.read_slot_clock().is_none() { panic!("Cannot start client before genesis!") @@ -65,7 +82,7 @@ where // If we don't block here we create an initial scenario where we're unable to process any // blocks and we're basically useless. { - let state_slot = beacon_chain.state.read().slot; + let state_slot = beacon_chain.head().beacon_state.slot; let wall_clock_slot = beacon_chain.read_slot_clock().unwrap(); let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap(); info!( @@ -81,13 +98,13 @@ where info!( log, "State initialized"; - "state_slot" => beacon_chain.state.read().slot, + "state_slot" => beacon_chain.head().beacon_state.slot, "wall_clock_slot" => beacon_chain.read_slot_clock().unwrap(), ); // Start the network service, libp2p and syncing threads // TODO: Add beacon_chain reference to network parameters - let network_config = &config.net_conf; + let network_config = &client_config.network; let network_logger = log.new(o!("Service" => "Network")); let (network, network_send) = NetworkService::new( beacon_chain.clone(), @@ -97,9 +114,9 @@ where )?; // spawn the RPC server - let rpc_exit_signal = if config.rpc_conf.enabled { + let rpc_exit_signal = if client_config.rpc.enabled { Some(rpc::start_server( - &config.rpc_conf, + &client_config.rpc, executor, network_send.clone(), beacon_chain.clone(), @@ -112,20 +129,26 @@ where // Start the `http_server` service. // // Note: presently we are ignoring the config and _always_ starting a HTTP server. - let http_exit_signal = Some(http_server::start_service( - &config.http_conf, - executor, - network_send, - beacon_chain.clone(), - &log, - )); + let http_exit_signal = if client_config.http.enabled { + Some(http_server::start_service( + &client_config.http, + executor, + network_send, + beacon_chain.clone(), + client_config.db_path().expect("unable to read datadir"), + metrics_registry, + &log, + )) + } else { + None + }; let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { // set up the validator work interval - start at next slot and proceed every slot let interval = { // Set the interval to start at the next slot, and every slot after - let slot_duration = Duration::from_secs(config.spec.seconds_per_slot); + let slot_duration = Duration::from_secs(seconds_per_slot); //TODO: Handle checked add correctly Interval::new(Instant::now() + duration_to_next_slot, slot_duration) }; @@ -147,8 +170,8 @@ where } Ok(Client { - _config: config, - _beacon_chain: beacon_chain, + _client_config: client_config, + beacon_chain, http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), @@ -159,6 +182,14 @@ where } } +impl Drop for Client { + fn drop(&mut self) { + // Save the beacon chain to it's store before dropping. + let _result = self.beacon_chain.persist(); + dbg!("Saved BeaconChain to store"); + } +} + fn do_state_catchup(chain: &Arc>, log: &slog::Logger) { if let Some(genesis_height) = chain.slots_since_genesis() { let result = chain.catchup_state(); @@ -167,7 +198,7 @@ fn do_state_catchup(chain: &Arc>, log: &slog "best_slot" => chain.head().beacon_block.slot, "latest_block_root" => format!("{}", chain.head().beacon_block_root), "wall_clock_slot" => chain.read_slot_clock().unwrap(), - "state_slot" => chain.state.read().slot, + "state_slot" => chain.head().beacon_state.slot, "slots_since_genesis" => genesis_height, ); diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index d9c43b23c0..cc6393e388 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -6,9 +6,12 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } +clap = "2.32.0" # SigP repository until PR is merged libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b3c32d9a821ae6cc89079499cc6e8a6bab0bffc3" } types = { path = "../../eth2/types" } +serde = "1.0" +serde_derive = "1.0" ssz = { path = "../../eth2/utils/ssz" } ssz_derive = { path = "../../eth2/utils/ssz_derive" } slog = "2.4.1" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 8f3a000e12..10b140c3b5 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -261,7 +261,7 @@ mod test { #[test] fn ssz_encoding() { - let original = PubsubMessage::Block(BeaconBlock::empty(&FoundationEthSpec::spec())); + let original = PubsubMessage::Block(BeaconBlock::empty(&MainnetEthSpec::default_spec())); let encoded = ssz_encode(&original); diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 2651006585..ee2add75eb 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -1,20 +1,22 @@ -use crate::Multiaddr; +use clap::ArgMatches; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; +use serde_derive::{Deserialize, Serialize}; +use types::multiaddr::{Error as MultiaddrError, Multiaddr}; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(default)] /// Network configuration for lighthouse. pub struct Config { - //TODO: stubbing networking initial params, change in the future /// IP address to listen on. - pub listen_addresses: Vec, - /// Listen port UDP/TCP. - pub listen_port: u16, + listen_addresses: Vec, /// Gossipsub configuration parameters. + #[serde(skip)] pub gs_config: GossipsubConfig, /// Configuration parameters for node identification protocol. + #[serde(skip)] pub identify_config: IdentifyConfig, /// List of nodes to initially connect to. - pub boot_nodes: Vec, + boot_nodes: Vec, /// Client version pub client_version: String, /// List of topics to subscribe to as strings @@ -25,15 +27,12 @@ impl Default for Config { /// Generate a default network configuration. fn default() -> Self { Config { - listen_addresses: vec!["/ip4/127.0.0.1/tcp/9000" - .parse() - .expect("is a correct multi-address")], - listen_port: 9000, + listen_addresses: vec!["/ip4/127.0.0.1/tcp/9000".to_string()], gs_config: GossipsubConfigBuilder::new() .max_gossip_size(4_000_000) .build(), identify_config: IdentifyConfig::default(), - boot_nodes: Vec::new(), + boot_nodes: vec![], client_version: version::version(), topics: vec![String::from("beacon_chain")], } @@ -41,12 +40,34 @@ impl Default for Config { } impl Config { - pub fn new(boot_nodes: Vec) -> Self { + pub fn new(boot_nodes: Vec) -> Self { let mut conf = Config::default(); conf.boot_nodes = boot_nodes; conf } + + pub fn listen_addresses(&self) -> Result, MultiaddrError> { + self.listen_addresses.iter().map(|s| s.parse()).collect() + } + + pub fn boot_nodes(&self) -> Result, MultiaddrError> { + self.boot_nodes.iter().map(|s| s.parse()).collect() + } + + pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { + if let Some(listen_address_str) = args.value_of("listen-address") { + let listen_addresses = listen_address_str.split(',').map(Into::into).collect(); + self.listen_addresses = listen_addresses; + } + + if let Some(boot_addresses_str) = args.value_of("boot-nodes") { + let boot_addresses = boot_addresses_str.split(',').map(Into::into).collect(); + self.boot_nodes = boot_addresses; + } + + Ok(()) + } } /// The configuration parameters for the Identify protocol diff --git a/beacon_node/eth2-libp2p/src/error.rs b/beacon_node/eth2-libp2p/src/error.rs index 163fe575d2..a291e8fec5 100644 --- a/beacon_node/eth2-libp2p/src/error.rs +++ b/beacon_node/eth2-libp2p/src/error.rs @@ -1,8 +1,5 @@ // generates error types -use error_chain::{ - error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, - impl_extract_backtrace, -}; +use error_chain::error_chain; error_chain! {} diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index ef73157650..b752b74cbb 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -172,8 +172,8 @@ pub struct BeaconBlockRootsResponse { impl BeaconBlockRootsResponse { /// Returns `true` if each `self.roots.slot[i]` is higher than the preceeding `i`. pub fn slots_are_ascending(&self) -> bool { - for i in 1..self.roots.len() { - if self.roots[i - 1].slot >= self.roots[i].slot { + for window in self.roots.windows(2) { + if window[0].slot >= window[1].slot { return false; } } diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index 82257cc327..de52f964e5 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -1,6 +1,7 @@ use super::methods::*; use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use ssz::{impl_decode_via_from, impl_encode_via_from, ssz_encode, Decode, Encode}; +use ssz_derive::{Decode, Encode}; use std::hash::{Hash, Hasher}; use std::io; use std::iter; @@ -31,7 +32,7 @@ impl Default for RPCProtocol { } /// A monotonic counter for ordering `RPCRequest`s. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Copy, Default)] pub struct RequestId(u64); impl RequestId { @@ -41,7 +42,7 @@ impl RequestId { } /// Return the previous id. - pub fn previous(&self) -> Self { + pub fn previous(self) -> Self { Self(self.0 - 1) } } @@ -115,65 +116,67 @@ where } } +/// A helper structed used to obtain SSZ serialization for RPC messages. +#[derive(Encode, Decode, Default)] +struct SszContainer { + /// Note: the `is_request` field is not included in the spec. + /// + /// We are unable to determine a request from a response unless we add some flag to the + /// packet. Here we have added a bool (encoded as 1 byte) which is set to `1` if the + /// message is a request. + is_request: bool, + id: u64, + other: u16, + bytes: Vec, +} + // NOTE! // // This code has not been tested, it is a placeholder until we can update to the new libp2p // spec. fn decode(packet: Vec) -> Result { - let mut builder = ssz::SszDecoderBuilder::new(&packet); + let msg = SszContainer::from_ssz_bytes(&packet)?; - builder.register_type::()?; - builder.register_type::()?; - builder.register_type::()?; - builder.register_type::>()?; - - let mut decoder = builder.build()?; - - let request: bool = decoder.decode_next()?; - let id: RequestId = decoder.decode_next()?; - let method_id: u16 = decoder.decode_next()?; - let bytes: Vec = decoder.decode_next()?; - - if request { - let body = match RPCMethod::from(method_id) { - RPCMethod::Hello => RPCRequest::Hello(HelloMessage::from_ssz_bytes(&bytes)?), - RPCMethod::Goodbye => RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(&bytes)?), + if msg.is_request { + let body = match RPCMethod::from(msg.other) { + RPCMethod::Hello => RPCRequest::Hello(HelloMessage::from_ssz_bytes(&msg.bytes)?), + RPCMethod::Goodbye => RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(&msg.bytes)?), RPCMethod::BeaconBlockRoots => { - RPCRequest::BeaconBlockRoots(BeaconBlockRootsRequest::from_ssz_bytes(&bytes)?) - } - RPCMethod::BeaconBlockHeaders => { - RPCRequest::BeaconBlockHeaders(BeaconBlockHeadersRequest::from_ssz_bytes(&bytes)?) + RPCRequest::BeaconBlockRoots(BeaconBlockRootsRequest::from_ssz_bytes(&msg.bytes)?) } + RPCMethod::BeaconBlockHeaders => RPCRequest::BeaconBlockHeaders( + BeaconBlockHeadersRequest::from_ssz_bytes(&msg.bytes)?, + ), RPCMethod::BeaconBlockBodies => { - RPCRequest::BeaconBlockBodies(BeaconBlockBodiesRequest::from_ssz_bytes(&bytes)?) + RPCRequest::BeaconBlockBodies(BeaconBlockBodiesRequest::from_ssz_bytes(&msg.bytes)?) } RPCMethod::BeaconChainState => { - RPCRequest::BeaconChainState(BeaconChainStateRequest::from_ssz_bytes(&bytes)?) + RPCRequest::BeaconChainState(BeaconChainStateRequest::from_ssz_bytes(&msg.bytes)?) } RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), }; Ok(RPCEvent::Request { - id, - method_id, + id: RequestId::from(msg.id), + method_id: msg.other, body, }) } // we have received a response else { - let result = match RPCMethod::from(method_id) { - RPCMethod::Hello => RPCResponse::Hello(HelloMessage::from_ssz_bytes(&bytes)?), + let result = match RPCMethod::from(msg.other) { + RPCMethod::Hello => RPCResponse::Hello(HelloMessage::from_ssz_bytes(&msg.bytes)?), RPCMethod::BeaconBlockRoots => { - RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse::from_ssz_bytes(&bytes)?) - } - RPCMethod::BeaconBlockHeaders => { - RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse::from_ssz_bytes(&bytes)?) - } - RPCMethod::BeaconBlockBodies => { - RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse::from_ssz_bytes(&packet)?) + RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse::from_ssz_bytes(&msg.bytes)?) } + RPCMethod::BeaconBlockHeaders => RPCResponse::BeaconBlockHeaders( + BeaconBlockHeadersResponse::from_ssz_bytes(&msg.bytes)?, + ), + RPCMethod::BeaconBlockBodies => RPCResponse::BeaconBlockBodies( + BeaconBlockBodiesResponse::from_ssz_bytes(&msg.bytes)?, + ), RPCMethod::BeaconChainState => { - RPCResponse::BeaconChainState(BeaconChainStateResponse::from_ssz_bytes(&packet)?) + RPCResponse::BeaconChainState(BeaconChainStateResponse::from_ssz_bytes(&msg.bytes)?) } // We should never receive a goodbye response; it is invalid. RPCMethod::Goodbye => return Err(DecodeError::UnknownRPCMethod), @@ -181,8 +184,8 @@ fn decode(packet: Vec) -> Result { }; Ok(RPCEvent::Response { - id, - method_id, + id: RequestId::from(msg.id), + method_id: msg.other, result, }) } @@ -208,80 +211,44 @@ impl Encode for RPCEvent { false } - // NOTE! - // - // This code has not been tested, it is a placeholder until we can update to the new libp2p - // spec. fn ssz_append(&self, buf: &mut Vec) { - let offset = ::ssz_fixed_len() - + ::ssz_fixed_len() - + as Encode>::ssz_fixed_len(); - - let mut encoder = ssz::SszEncoder::container(buf, offset); - - match self { + let container = match self { RPCEvent::Request { id, method_id, body, - } => { - encoder.append(&true); - encoder.append(id); - encoder.append(method_id); - - // Encode the `body` as a `Vec`. - match body { - RPCRequest::Hello(body) => { - encoder.append(&body.as_ssz_bytes()); - } - RPCRequest::Goodbye(body) => { - encoder.append(&body.as_ssz_bytes()); - } - RPCRequest::BeaconBlockRoots(body) => { - encoder.append(&body.as_ssz_bytes()); - } - RPCRequest::BeaconBlockHeaders(body) => { - encoder.append(&body.as_ssz_bytes()); - } - RPCRequest::BeaconBlockBodies(body) => { - encoder.append(&body.as_ssz_bytes()); - } - RPCRequest::BeaconChainState(body) => { - encoder.append(&body.as_ssz_bytes()); - } - } - } + } => SszContainer { + is_request: true, + id: (*id).into(), + other: *method_id, + bytes: match body { + RPCRequest::Hello(body) => body.as_ssz_bytes(), + RPCRequest::Goodbye(body) => body.as_ssz_bytes(), + RPCRequest::BeaconBlockRoots(body) => body.as_ssz_bytes(), + RPCRequest::BeaconBlockHeaders(body) => body.as_ssz_bytes(), + RPCRequest::BeaconBlockBodies(body) => body.as_ssz_bytes(), + RPCRequest::BeaconChainState(body) => body.as_ssz_bytes(), + }, + }, RPCEvent::Response { id, method_id, result, - } => { - encoder.append(&true); - encoder.append(id); - encoder.append(method_id); + } => SszContainer { + is_request: false, + id: (*id).into(), + other: *method_id, + bytes: match result { + RPCResponse::Hello(response) => response.as_ssz_bytes(), + RPCResponse::BeaconBlockRoots(response) => response.as_ssz_bytes(), + RPCResponse::BeaconBlockHeaders(response) => response.as_ssz_bytes(), + RPCResponse::BeaconBlockBodies(response) => response.as_ssz_bytes(), + RPCResponse::BeaconChainState(response) => response.as_ssz_bytes(), + }, + }, + }; - match result { - RPCResponse::Hello(response) => { - encoder.append(&response.as_ssz_bytes()); - } - RPCResponse::BeaconBlockRoots(response) => { - encoder.append(&response.as_ssz_bytes()); - } - RPCResponse::BeaconBlockHeaders(response) => { - encoder.append(&response.as_ssz_bytes()); - } - RPCResponse::BeaconBlockBodies(response) => { - encoder.append(&response.as_ssz_bytes()); - } - RPCResponse::BeaconChainState(response) => { - encoder.append(&response.as_ssz_bytes()); - } - } - } - } - - // Finalize the encoder, writing to `buf`. - encoder.finalize(); + container.ssz_append(buf) } } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 07a36e408c..18f7ca98ce 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -57,7 +57,10 @@ impl Service { }; // listen on all addresses - for address in &config.listen_addresses { + for address in config + .listen_addresses() + .map_err(|e| format!("Invalid listen multiaddr: {}", e))? + { match Swarm::listen_on(&mut swarm, address.clone()) { Ok(mut listen_addr) => { listen_addr.append(Protocol::P2p(local_peer_id.clone().into())); @@ -68,7 +71,10 @@ impl Service { } // connect to boot nodes - these are currently stored as multiaddrs // Once we have discovery, can set to peerId - for bootnode in config.boot_nodes { + for bootnode in config + .boot_nodes() + .map_err(|e| format!("Invalid boot node multiaddr: {:?}", e))? + { match Swarm::dial_addr(&mut swarm, bootnode.clone()) { Ok(()) => debug!(log, "Dialing bootnode: {}", bootnode), Err(err) => debug!( diff --git a/beacon_node/http_server/Cargo.toml b/beacon_node/http_server/Cargo.toml index fb8bf9f4b7..098c3e1c9d 100644 --- a/beacon_node/http_server/Cargo.toml +++ b/beacon_node/http_server/Cargo.toml @@ -20,7 +20,7 @@ fork_choice = { path = "../../eth2/fork_choice" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } persistent = "^0.4" protobuf = "2.0.2" -prometheus = "^0.6" +prometheus = { version = "^0.6", features = ["process"] } clap = "2.32.0" store = { path = "../store" } dirs = "1.0.3" diff --git a/beacon_node/http_server/src/key.rs b/beacon_node/http_server/src/key.rs index 2d27ce9f06..a69da6747f 100644 --- a/beacon_node/http_server/src/key.rs +++ b/beacon_node/http_server/src/key.rs @@ -1,6 +1,9 @@ +use crate::metrics::LocalMetrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; use iron::typemap::Key; +use prometheus::Registry; use std::marker::PhantomData; +use std::path::PathBuf; use std::sync::Arc; pub struct BeaconChainKey { @@ -10,3 +13,21 @@ pub struct BeaconChainKey { impl Key for BeaconChainKey { type Value = Arc>; } + +pub struct MetricsRegistryKey; + +impl Key for MetricsRegistryKey { + type Value = Registry; +} + +pub struct LocalMetricsKey; + +impl Key for LocalMetricsKey { + type Value = LocalMetrics; +} + +pub struct DBPathKey; + +impl Key for DBPathKey { + type Value = PathBuf; +} diff --git a/beacon_node/http_server/src/lib.rs b/beacon_node/http_server/src/lib.rs index 486badaff2..ab1176d612 100644 --- a/beacon_node/http_server/src/lib.rs +++ b/beacon_node/http_server/src/lib.rs @@ -3,39 +3,65 @@ mod key; mod metrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use clap::ArgMatches; use futures::Future; use iron::prelude::*; use network::NetworkMessage; +use prometheus::Registry; use router::Router; +use serde_derive::{Deserialize, Serialize}; use slog::{info, o, warn}; +use std::path::PathBuf; use std::sync::Arc; use tokio::runtime::TaskExecutor; -#[derive(PartialEq, Clone, Debug)] +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct HttpServerConfig { pub enabled: bool, pub listen_address: String, + pub listen_port: String, } impl Default for HttpServerConfig { fn default() -> Self { Self { enabled: false, - listen_address: "127.0.0.1:5051".to_string(), + listen_address: "127.0.0.1".to_string(), + listen_port: "5052".to_string(), } } } +impl HttpServerConfig { + pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { + if args.is_present("http") { + self.enabled = true; + } + + if let Some(listen_address) = args.value_of("http-address") { + self.listen_address = listen_address.to_string(); + } + + if let Some(listen_port) = args.value_of("http-port") { + self.listen_port = listen_port.to_string(); + } + + Ok(()) + } +} + /// Build the `iron` HTTP server, defining the core routes. pub fn create_iron_http_server( beacon_chain: Arc>, + db_path: PathBuf, + metrics_registry: Registry, ) -> Iron { let mut router = Router::new(); // A `GET` request to `/metrics` is handled by the `metrics` module. router.get( "/metrics", - metrics::build_handler(beacon_chain.clone()), + metrics::build_handler(beacon_chain.clone(), db_path, metrics_registry), "metrics", ); @@ -51,6 +77,8 @@ pub fn start_service( executor: &TaskExecutor, _network_chan: crossbeam_channel::Sender, beacon_chain: Arc>, + db_path: PathBuf, + metrics_registry: Registry, log: &slog::Logger, ) -> exit_future::Signal { let log = log.new(o!("Service"=>"HTTP")); @@ -61,7 +89,7 @@ pub fn start_service( let (shutdown_trigger, wait_for_shutdown) = exit_future::signal(); // Create an `iron` http, without starting it yet. - let iron = create_iron_http_server(beacon_chain); + let iron = create_iron_http_server(beacon_chain, db_path, metrics_registry); // Create a HTTP server future. // @@ -69,16 +97,14 @@ pub fn start_service( // 2. Build an exit future that will shutdown the server when requested. // 3. Return the exit future, so the caller may shutdown the service when desired. let http_service = { + let listen_address = format!("{}:{}", config.listen_address, config.listen_port); // Start the HTTP server - let server_start_result = iron.http(config.listen_address.clone()); + let server_start_result = iron.http(listen_address.clone()); if server_start_result.is_ok() { - info!(log, "HTTP server running on {}", config.listen_address); + info!(log, "HTTP server running on {}", listen_address); } else { - warn!( - log, - "HTTP server failed to start on {}", config.listen_address - ); + warn!(log, "HTTP server failed to start on {}", listen_address); } // Build a future that will shutdown the HTTP server when the `shutdown_trigger` is diff --git a/beacon_node/http_server/src/metrics.rs b/beacon_node/http_server/src/metrics.rs index eb7816d0eb..1b1ed1f3d4 100644 --- a/beacon_node/http_server/src/metrics.rs +++ b/beacon_node/http_server/src/metrics.rs @@ -1,20 +1,34 @@ -use crate::{key::BeaconChainKey, map_persistent_err_to_500}; +use crate::{ + key::{BeaconChainKey, DBPathKey, LocalMetricsKey, MetricsRegistryKey}, + map_persistent_err_to_500, +}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use iron::prelude::*; use iron::{status::Status, Handler, IronResult, Request, Response}; use persistent::Read; -use prometheus::{Encoder, IntCounter, Opts, Registry, TextEncoder}; -use slot_clock::SlotClock; +use prometheus::{Encoder, Registry, TextEncoder}; +use std::path::PathBuf; use std::sync::Arc; -use types::Slot; + +pub use local_metrics::LocalMetrics; + +mod local_metrics; /// Yields a handler for the metrics endpoint. pub fn build_handler( beacon_chain: Arc>, + db_path: PathBuf, + metrics_registry: Registry, ) -> impl Handler { let mut chain = Chain::new(handle_metrics::); + let local_metrics = LocalMetrics::new().unwrap(); + local_metrics.register(&metrics_registry).unwrap(); + chain.link(Read::>::both(beacon_chain)); + chain.link(Read::::both(metrics_registry)); + chain.link(Read::::both(local_metrics)); + chain.link(Read::::both(db_path)); chain } @@ -27,23 +41,28 @@ fn handle_metrics(req: &mut Request) -> IronResul .get::>>() .map_err(map_persistent_err_to_500)?; - let r = Registry::new(); + let r = req + .get::>() + .map_err(map_persistent_err_to_500)?; - let present_slot = if let Ok(Some(slot)) = beacon_chain.slot_clock.present_slot() { - slot - } else { - Slot::new(0) - }; - register_and_set_slot( - &r, - "present_slot", - "direct_slock_clock_reading", - present_slot, - ); + let local_metrics = req + .get::>() + .map_err(map_persistent_err_to_500)?; + + let db_path = req + .get::>() + .map_err(map_persistent_err_to_500)?; + + // Update metrics that are calculated on each scrape. + local_metrics.update(&beacon_chain, &db_path); - // Gather the metrics. let mut buffer = vec![]; let encoder = TextEncoder::new(); + + // Gather `DEFAULT_REGISTRY` metrics. + encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); + + // Gather metrics from our registry. let metric_families = r.gather(); encoder.encode(&metric_families, &mut buffer).unwrap(); @@ -51,10 +70,3 @@ fn handle_metrics(req: &mut Request) -> IronResul Ok(Response::with((Status::Ok, prom_string))) } - -fn register_and_set_slot(registry: &Registry, name: &str, help: &str, slot: Slot) { - let counter_opts = Opts::new(name, help); - let counter = IntCounter::with_opts(counter_opts).unwrap(); - registry.register(Box::new(counter.clone())).unwrap(); - counter.inc_by(slot.as_u64() as i64); -} diff --git a/beacon_node/http_server/src/metrics/local_metrics.rs b/beacon_node/http_server/src/metrics/local_metrics.rs new file mode 100644 index 0000000000..fa69ee0c42 --- /dev/null +++ b/beacon_node/http_server/src/metrics/local_metrics.rs @@ -0,0 +1,106 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use prometheus::{IntGauge, Opts, Registry}; +use slot_clock::SlotClock; +use std::fs::File; +use std::path::PathBuf; +use types::{EthSpec, Slot}; + +// If set to `true` will iterate and sum the balances of all validators in the state for each +// scrape. +const SHOULD_SUM_VALIDATOR_BALANCES: bool = true; + +pub struct LocalMetrics { + present_slot: IntGauge, + present_epoch: IntGauge, + best_slot: IntGauge, + validator_count: IntGauge, + justified_epoch: IntGauge, + finalized_epoch: IntGauge, + validator_balances_sum: IntGauge, + database_size: IntGauge, +} + +impl LocalMetrics { + /// Create a new instance. + pub fn new() -> Result { + Ok(Self { + present_slot: { + let opts = Opts::new("present_slot", "slot_at_time_of_scrape"); + IntGauge::with_opts(opts)? + }, + present_epoch: { + let opts = Opts::new("present_epoch", "epoch_at_time_of_scrape"); + IntGauge::with_opts(opts)? + }, + best_slot: { + let opts = Opts::new("best_slot", "slot_of_block_at_chain_head"); + IntGauge::with_opts(opts)? + }, + validator_count: { + let opts = Opts::new("validator_count", "number_of_validators"); + IntGauge::with_opts(opts)? + }, + justified_epoch: { + let opts = Opts::new("justified_epoch", "state_justified_epoch"); + IntGauge::with_opts(opts)? + }, + finalized_epoch: { + let opts = Opts::new("finalized_epoch", "state_finalized_epoch"); + IntGauge::with_opts(opts)? + }, + validator_balances_sum: { + let opts = Opts::new("validator_balances_sum", "sum_of_all_validator_balances"); + IntGauge::with_opts(opts)? + }, + database_size: { + let opts = Opts::new("database_size", "size_of_on_disk_db_in_mb"); + IntGauge::with_opts(opts)? + }, + }) + } + + /// Registry this instance with the `registry`. + pub fn register(&self, registry: &Registry) -> Result<(), prometheus::Error> { + registry.register(Box::new(self.present_slot.clone()))?; + registry.register(Box::new(self.present_epoch.clone()))?; + registry.register(Box::new(self.best_slot.clone()))?; + registry.register(Box::new(self.validator_count.clone()))?; + registry.register(Box::new(self.finalized_epoch.clone()))?; + registry.register(Box::new(self.justified_epoch.clone()))?; + registry.register(Box::new(self.validator_balances_sum.clone()))?; + registry.register(Box::new(self.database_size.clone()))?; + + Ok(()) + } + + /// Update the metrics in `self` to the latest values. + pub fn update(&self, beacon_chain: &BeaconChain, db_path: &PathBuf) { + let state = &beacon_chain.head().beacon_state; + + let present_slot = beacon_chain + .slot_clock + .present_slot() + .unwrap_or_else(|_| None) + .unwrap_or_else(|| Slot::new(0)); + self.present_slot.set(present_slot.as_u64() as i64); + self.present_epoch + .set(present_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64() as i64); + + self.best_slot.set(state.slot.as_u64() as i64); + self.validator_count + .set(state.validator_registry.len() as i64); + self.justified_epoch + .set(state.current_justified_epoch.as_u64() as i64); + self.finalized_epoch + .set(state.finalized_epoch.as_u64() as i64); + if SHOULD_SUM_VALIDATOR_BALANCES { + self.validator_balances_sum + .set(state.balances.iter().sum::() as i64); + } + let db_size = File::open(db_path) + .and_then(|f| f.metadata()) + .and_then(|m| Ok(m.len())) + .unwrap_or(0); + self.database_size.set(db_size as i64); + } +} diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 9cac126595..ebf71aa4e0 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -9,6 +9,7 @@ sloggers = "0.3.2" [dependencies] beacon_chain = { path = "../beacon_chain" } +store = { path = "../store" } eth2-libp2p = { path = "../eth2-libp2p" } version = { path = "../version" } types = { path = "../../eth2/types" } diff --git a/beacon_node/network/src/beacon_chain.rs b/beacon_node/network/src/beacon_chain.rs deleted file mode 100644 index 6324e3a940..0000000000 --- a/beacon_node/network/src/beacon_chain.rs +++ /dev/null @@ -1,157 +0,0 @@ -use beacon_chain::BeaconChain as RawBeaconChain; -use beacon_chain::{ - parking_lot::RwLockReadGuard, - types::{BeaconState, ChainSpec}, - AttestationValidationError, CheckPoint, -}; -use eth2_libp2p::rpc::HelloMessage; -use types::{ - Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, -}; - -pub use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessingOutcome, InvalidBlock}; - -/// The network's API to the beacon chain. -pub trait BeaconChain: Send + Sync { - fn get_spec(&self) -> &ChainSpec; - - fn get_state(&self) -> RwLockReadGuard>; - - fn slot(&self) -> Slot; - - fn head(&self) -> RwLockReadGuard>; - - fn get_block(&self, block_root: &Hash256) -> Result, BeaconChainError>; - - fn best_slot(&self) -> Slot; - - fn best_block_root(&self) -> Hash256; - - fn finalized_head(&self) -> RwLockReadGuard>; - - fn finalized_epoch(&self) -> Epoch; - - fn hello_message(&self) -> HelloMessage; - - fn process_block(&self, block: BeaconBlock) - -> Result; - - fn process_attestation( - &self, - attestation: Attestation, - ) -> Result<(), AttestationValidationError>; - - fn get_block_roots( - &self, - start_slot: Slot, - count: usize, - skip: usize, - ) -> Result, BeaconChainError>; - - fn get_block_headers( - &self, - start_slot: Slot, - count: usize, - skip: usize, - ) -> Result, BeaconChainError>; - - fn get_block_bodies(&self, roots: &[Hash256]) - -> Result, BeaconChainError>; - - fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result; -} - -impl BeaconChain for RawBeaconChain { - fn get_spec(&self) -> &ChainSpec { - &self.spec - } - - fn get_state(&self) -> RwLockReadGuard> { - self.state.read() - } - - fn slot(&self) -> Slot { - self.get_state().slot - } - - fn head(&self) -> RwLockReadGuard> { - self.head() - } - - fn get_block(&self, block_root: &Hash256) -> Result, BeaconChainError> { - self.get_block(block_root) - } - - fn finalized_epoch(&self) -> Epoch { - self.get_state().finalized_epoch - } - - fn finalized_head(&self) -> RwLockReadGuard> { - self.finalized_head() - } - - fn best_slot(&self) -> Slot { - self.head().beacon_block.slot - } - - fn best_block_root(&self) -> Hash256 { - self.head().beacon_block_root - } - - fn hello_message(&self) -> HelloMessage { - let spec = self.get_spec(); - let state = self.get_state(); - - HelloMessage { - network_id: spec.chain_id, - latest_finalized_root: state.finalized_root, - latest_finalized_epoch: state.finalized_epoch, - best_root: self.best_block_root(), - best_slot: self.best_slot(), - } - } - - fn process_block( - &self, - block: BeaconBlock, - ) -> Result { - self.process_block(block) - } - - fn process_attestation( - &self, - attestation: Attestation, - ) -> Result<(), AttestationValidationError> { - self.process_attestation(attestation) - } - - fn get_block_roots( - &self, - start_slot: Slot, - count: usize, - skip: usize, - ) -> Result, BeaconChainError> { - self.get_block_roots(start_slot, count, skip) - } - - fn get_block_headers( - &self, - start_slot: Slot, - count: usize, - skip: usize, - ) -> Result, BeaconChainError> { - let roots = self.get_block_roots(start_slot, count, skip)?; - self.get_block_headers(&roots) - } - - fn get_block_bodies( - &self, - roots: &[Hash256], - ) -> Result, BeaconChainError> { - self.get_block_bodies(roots) - } - - fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { - self.is_new_block_root(beacon_block_root) - } -} diff --git a/beacon_node/network/src/error.rs b/beacon_node/network/src/error.rs index cdd6b62094..fc061ff442 100644 --- a/beacon_node/network/src/error.rs +++ b/beacon_node/network/src/error.rs @@ -1,10 +1,7 @@ // generates error types use eth2_libp2p; -use error_chain::{ - error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, - impl_extract_backtrace, -}; +use error_chain::error_chain; error_chain! { links { diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index c298e31b4e..b805c1d755 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,5 +1,4 @@ /// This crate provides the network server for Lighthouse. -pub mod beacon_chain; pub mod error; pub mod message_handler; pub mod service; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index f6a27ad600..40a396c3b0 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,7 +1,7 @@ -use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crate::error; use crate::service::{NetworkMessage, OutgoingMessage}; use crate::sync::SimpleSync; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use crossbeam_channel::{unbounded as channel, Sender}; use eth2_libp2p::{ behaviour::PubsubMessage, @@ -155,7 +155,7 @@ impl MessageHandler { if self .network_context .outstanding_outgoing_request_ids - .remove(&(peer_id.clone(), id.clone())) + .remove(&(peer_id.clone(), id)) .is_none() { warn!( @@ -250,7 +250,7 @@ impl NetworkContext { let id = self.generate_request_id(&peer_id); self.outstanding_outgoing_request_ids - .insert((peer_id.clone(), id.clone()), Instant::now()); + .insert((peer_id.clone(), id), Instant::now()); self.send_rpc_event( peer_id, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index d87b9e5a9d..9c71a60f7e 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,7 +1,7 @@ -use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crate::error; use crate::message_handler::{HandlerMessage, MessageHandler}; use crate::NetworkConfig; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::{Libp2pEvent, PeerId}; diff --git a/beacon_node/network/src/sync/import_queue.rs b/beacon_node/network/src/sync/import_queue.rs index 793f4c395e..90c354cfd5 100644 --- a/beacon_node/network/src/sync/import_queue.rs +++ b/beacon_node/network/src/sync/import_queue.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::PeerId; use slog::{debug, error}; @@ -166,7 +166,7 @@ impl ImportQueue { let mut required_bodies: Vec = vec![]; for header in headers { - let block_root = Hash256::from_slice(&header.tree_hash_root()[..]); + let block_root = Hash256::from_slice(&header.canonical_root()[..]); if self.chain_has_not_seen_block(&block_root) { self.insert_header(block_root, header, sender.clone()); @@ -212,7 +212,7 @@ impl ImportQueue { // Case 2: there was no partial with a matching block root. // // A new partial is added. This case permits adding a header without already known the - // root -- this is not possible in the wire protocol however we support it anyway. + // root. self.partials.push(PartialBeaconBlock { slot: header.slot, block_root, @@ -250,7 +250,7 @@ impl ImportQueue { /// /// If the partial already existed, the `inserted` time is set to `now`. fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) { - let block_root = Hash256::from_slice(&block.tree_hash_root()[..]); + let block_root = Hash256::from_slice(&block.canonical_root()[..]); let partial = PartialBeaconBlock { slot: block.slot, diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 6ab8ea7d9a..403a8c54b8 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,6 +1,6 @@ use super::import_queue::ImportQueue; -use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InvalidBlock}; use crate::message_handler::NetworkContext; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; @@ -8,8 +8,10 @@ use slog::{debug, error, info, o, warn}; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use tree_hash::TreeHash; -use types::{Attestation, BeaconBlock, Epoch, Hash256, Slot}; +use store::Store; +use types::{ + Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, +}; /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. const SLOT_IMPORT_TOLERANCE: u64 = 100; @@ -21,6 +23,9 @@ const QUEUE_STALE_SECS: u64 = 600; /// Otherwise we queue it. const FUTURE_SLOT_TOLERANCE: u64 = 1; +const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; +const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; + /// Keeps track of syncing information for known connected peers. #[derive(Clone, Copy, Debug)] pub struct PeerSyncInfo { @@ -31,51 +36,6 @@ pub struct PeerSyncInfo { best_slot: Slot, } -impl PeerSyncInfo { - /// Returns `true` if the has a different network ID to `other`. - fn has_different_network_id_to(&self, other: Self) -> bool { - self.network_id != other.network_id - } - - /// Returns `true` if the peer has a higher finalized epoch than `other`. - fn has_higher_finalized_epoch_than(&self, other: Self) -> bool { - self.latest_finalized_epoch > other.latest_finalized_epoch - } - - /// Returns `true` if the peer has a higher best slot than `other`. - fn has_higher_best_slot_than(&self, other: Self) -> bool { - self.best_slot > other.best_slot - } -} - -/// The status of a peers view on the chain, relative to some other view of the chain (presumably -/// our view). -#[derive(PartialEq, Clone, Copy, Debug)] -pub enum PeerStatus { - /// The peer is on a completely different chain. - DifferentNetworkId, - /// The peer lists a finalized epoch for which we have a different root. - FinalizedEpochNotInChain, - /// The peer has a higher finalized epoch. - HigherFinalizedEpoch, - /// The peer has a higher best slot. - HigherBestSlot, - /// The peer has the same or lesser view of the chain. We have nothing to request of them. - NotInteresting, -} - -impl PeerStatus { - pub fn should_handshake(self) -> bool { - match self { - PeerStatus::DifferentNetworkId => false, - PeerStatus::FinalizedEpochNotInChain => false, - PeerStatus::HigherFinalizedEpoch => true, - PeerStatus::HigherBestSlot => true, - PeerStatus::NotInteresting => true, - } - } -} - impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { @@ -90,7 +50,7 @@ impl From for PeerSyncInfo { impl From<&Arc>> for PeerSyncInfo { fn from(chain: &Arc>) -> PeerSyncInfo { - Self::from(chain.hello_message()) + Self::from(hello_message(chain)) } } @@ -151,9 +111,9 @@ impl SimpleSync { /// /// Sends a `Hello` message to the peer. pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { - info!(self.log, "PeerConnect"; "peer" => format!("{:?}", peer_id)); + info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id)); - network.send_rpc_request(peer_id, RPCRequest::Hello(self.chain.hello_message())); + network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); } /// Handle a `Hello` request. @@ -172,7 +132,7 @@ impl SimpleSync { network.send_rpc_response( peer_id.clone(), request_id, - RPCResponse::Hello(self.chain.hello_message()), + RPCResponse::Hello(hello_message(&self.chain)), ); self.process_hello(peer_id, hello, network); @@ -191,51 +151,6 @@ impl SimpleSync { self.process_hello(peer_id, hello, network); } - /// Returns a `PeerStatus` for some peer. - fn peer_status(&self, peer: PeerSyncInfo) -> PeerStatus { - let local = PeerSyncInfo::from(&self.chain); - - if peer.has_different_network_id_to(local) { - return PeerStatus::DifferentNetworkId; - } - - if local.has_higher_finalized_epoch_than(peer) { - let peer_finalized_slot = peer - .latest_finalized_epoch - .start_slot(self.chain.get_spec().slots_per_epoch); - - let local_roots = self.chain.get_block_roots(peer_finalized_slot, 1, 0); - - if let Ok(local_roots) = local_roots { - if let Some(local_root) = local_roots.get(0) { - if *local_root != peer.latest_finalized_root { - return PeerStatus::FinalizedEpochNotInChain; - } - } else { - error!( - self.log, - "Cannot get root for peer finalized slot."; - "error" => "empty roots" - ); - } - } else { - error!( - self.log, - "Cannot get root for peer finalized slot."; - "error" => format!("{:?}", local_roots) - ); - } - } - - if peer.has_higher_finalized_epoch_than(local) { - PeerStatus::HigherFinalizedEpoch - } else if peer.has_higher_best_slot_than(local) { - PeerStatus::HigherBestSlot - } else { - PeerStatus::NotInteresting - } - } - /// Process a `Hello` message, requesting new blocks if appropriate. /// /// Disconnects the peer if required. @@ -245,31 +160,64 @@ impl SimpleSync { hello: HelloMessage, network: &mut NetworkContext, ) { - let spec = self.chain.get_spec(); + let spec = &self.chain.spec; let remote = PeerSyncInfo::from(hello); let local = PeerSyncInfo::from(&self.chain); - let remote_status = self.peer_status(remote); - if remote_status.should_handshake() { - info!(self.log, "HandshakeSuccess"; "peer" => format!("{:?}", peer_id)); - self.known_peers.insert(peer_id.clone(), remote); - } else { + // Disconnect nodes who are on a different network. + if local.network_id != remote.network_id { info!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), "reason" => "network_id" ); network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork); - } + // Disconnect nodes if our finalized epoch is greater than thieirs, and their finalized + // epoch is not in our chain. Viz., they are on another chain. + // + // If the local or remote have a `latest_finalized_root == ZERO_HASH`, skips checks about + // the finalized_root. The logic is akward and I think we're better without it. + } else if (local.latest_finalized_epoch >= remote.latest_finalized_epoch) + && (!self + .chain + .rev_iter_block_roots(local.best_slot) + .any(|root| root == remote.latest_finalized_root)) + && (local.latest_finalized_root != spec.zero_hash) + && (remote.latest_finalized_root != spec.zero_hash) + { + info!( + self.log, "HandshakeFailure"; + "peer" => format!("{:?}", peer_id), + "reason" => "wrong_finalized_chain" + ); + network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork); + // Process handshakes from peers that seem to be on our chain. + } else { + info!(self.log, "HandshakeSuccess"; "peer" => format!("{:?}", peer_id)); + self.known_peers.insert(peer_id.clone(), remote); - // If required, send additional requests. - match remote_status { - PeerStatus::HigherFinalizedEpoch => { - let start_slot = remote + // If we have equal or better finalized epochs and best slots, we require nothing else from + // this peer. + // + // We make an exception when our best slot is 0. Best slot does not indicate wether or + // not there is a block at slot zero. + if (remote.latest_finalized_epoch <= local.latest_finalized_epoch) + && (remote.best_slot <= local.best_slot) + && (local.best_slot > 0) + { + debug!(self.log, "Peer is naive"; "peer" => format!("{:?}", peer_id)); + return; + } + + // If the remote has a higher finalized epoch, request all block roots from our finalized + // epoch through to its best slot. + if remote.latest_finalized_epoch > local.latest_finalized_epoch { + debug!(self.log, "Peer has high finalized epoch"; "peer" => format!("{:?}", peer_id)); + let start_slot = local .latest_finalized_epoch - .start_slot(spec.slots_per_epoch); - let required_slots = start_slot - local.best_slot; + .start_slot(T::EthSpec::slots_per_epoch()); + let required_slots = remote.best_slot - start_slot; self.request_block_roots( peer_id, @@ -279,22 +227,26 @@ impl SimpleSync { }, network, ); - } - PeerStatus::HigherBestSlot => { - let required_slots = remote.best_slot - local.best_slot; + // If the remote has a greater best slot, request the roots between our best slot and their + // best slot. + } else if remote.best_slot > local.best_slot { + debug!(self.log, "Peer has higher best slot"; "peer" => format!("{:?}", peer_id)); + let start_slot = local + .latest_finalized_epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let required_slots = remote.best_slot - start_slot; self.request_block_roots( peer_id, BeaconBlockRootsRequest { - start_slot: local.best_slot + 1, + start_slot, count: required_slots.into(), }, network, ); + } else { + debug!(self.log, "Nothing to request from peer"; "peer" => format!("{:?}", peer_id)); } - PeerStatus::FinalizedEpochNotInChain => {} - PeerStatus::DifferentNetworkId => {} - PeerStatus::NotInteresting => {} } } @@ -311,34 +263,40 @@ impl SimpleSync { "BlockRootsRequest"; "peer" => format!("{:?}", peer_id), "count" => req.count, + "start_slot" => req.start_slot, ); - let roots = match self + let mut roots: Vec = self .chain - .get_block_roots(req.start_slot, req.count as usize, 0) - { - Ok(roots) => roots, - Err(e) => { - // TODO: return RPC error. - warn!( - self.log, - "RPCRequest"; "peer" => format!("{:?}", peer_id), - "req" => "BeaconBlockRoots", - "error" => format!("{:?}", e) - ); - return; - } - }; + .rev_iter_block_roots(req.start_slot + req.count) + .skip(1) + .take(req.count as usize) + .collect(); - let roots = roots + if roots.len() as u64 != req.count { + debug!( + self.log, + "BlockRootsRequest"; + "peer" => format!("{:?}", peer_id), + "msg" => "Failed to return all requested hashes", + "requested" => req.count, + "returned" => roots.len(), + ); + } + + roots.reverse(); + + let mut roots: Vec = roots .iter() .enumerate() - .map(|(i, &block_root)| BlockRootSlot { + .map(|(i, block_root)| BlockRootSlot { slot: req.start_slot + Slot::from(i), - block_root, + block_root: *block_root, }) .collect(); + roots.dedup_by_key(|brs| brs.block_root); + network.send_rpc_response( peer_id, request_id, @@ -424,23 +382,29 @@ impl SimpleSync { "count" => req.max_headers, ); - let headers = match self.chain.get_block_headers( - req.start_slot, - req.max_headers as usize, - req.skip_slots as usize, - ) { - Ok(headers) => headers, - Err(e) => { - // TODO: return RPC error. - warn!( - self.log, - "RPCRequest"; "peer" => format!("{:?}", peer_id), - "req" => "BeaconBlockHeaders", - "error" => format!("{:?}", e) - ); - return; - } - }; + let count = req.max_headers; + + // Collect the block roots. + // + // Instead of using `chain.rev_iter_blocks` we collect the roots first. This avoids + // unnecessary block deserialization when `req.skip_slots > 0`. + let mut roots: Vec = self + .chain + .rev_iter_block_roots(req.start_slot + (count - 1)) + .take(count as usize) + .collect(); + + roots.reverse(); + roots.dedup(); + + let headers: Vec = roots + .into_iter() + .step_by(req.skip_slots as usize + 1) + .filter_map(|root| { + let block = self.chain.store.get::(&root).ok()?; + Some(block?.block_header()) + }) + .collect(); network.send_rpc_response( peer_id, @@ -488,27 +452,33 @@ impl SimpleSync { req: BeaconBlockBodiesRequest, network: &mut NetworkContext, ) { + let block_bodies: Vec = req + .block_roots + .iter() + .filter_map(|root| { + if let Ok(Some(block)) = self.chain.store.get::(root) { + Some(block.body) + } else { + debug!( + self.log, + "Peer requested unknown block"; + "peer" => format!("{:?}", peer_id), + "request_root" => format!("{:}", root), + ); + + None + } + }) + .collect(); + debug!( self.log, "BlockBodiesRequest"; "peer" => format!("{:?}", peer_id), - "count" => req.block_roots.len(), + "requested" => req.block_roots.len(), + "returned" => block_bodies.len(), ); - let block_bodies = match self.chain.get_block_bodies(&req.block_roots) { - Ok(bodies) => bodies, - Err(e) => { - // TODO: return RPC error. - warn!( - self.log, - "RPCRequest"; "peer" => format!("{:?}", peer_id), - "req" => "BeaconBlockBodies", - "error" => format!("{:?}", e) - ); - return; - } - }; - network.send_rpc_response( peer_id, request_id, @@ -542,6 +512,8 @@ impl SimpleSync { /// Process a gossip message declaring a new block. /// + /// Attempts to apply to block to the beacon chain. May queue the block for later processing. + /// /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. pub fn on_block_gossip( &mut self, @@ -549,140 +521,35 @@ impl SimpleSync { block: BeaconBlock, network: &mut NetworkContext, ) -> bool { - info!( - self.log, - "NewGossipBlock"; - "peer" => format!("{:?}", peer_id), - ); + if let Some(outcome) = + self.process_block(peer_id.clone(), block.clone(), network, &"gossip") + { + match outcome { + BlockProcessingOutcome::Processed => SHOULD_FORWARD_GOSSIP_BLOCK, + BlockProcessingOutcome::ParentUnknown { .. } => { + self.import_queue + .enqueue_full_blocks(vec![block], peer_id.clone()); - // Ignore any block from a finalized slot. - if self.slot_is_finalized(block.slot) { - warn!( - self.log, "NewGossipBlock"; - "msg" => "new block slot is finalized.", - "block_slot" => block.slot, - ); - return false; - } - - let block_root = Hash256::from_slice(&block.tree_hash_root()); - - // Ignore any block that the chain already knows about. - if self.chain_has_seen_block(&block_root) { - println!("this happened"); - // TODO: Age confirm that we shouldn't forward a block if we already know of it. - return false; - } - - debug!( - self.log, - "NewGossipBlock"; - "peer" => format!("{:?}", peer_id), - "msg" => "processing block", - ); - match self.chain.process_block(block.clone()) { - Ok(BlockProcessingOutcome::InvalidBlock(InvalidBlock::ParentUnknown)) => { - // The block was valid and we processed it successfully. - debug!( - self.log, "NewGossipBlock"; - "msg" => "parent block unknown", - "parent_root" => format!("{}", block.previous_block_root), - "peer" => format!("{:?}", peer_id), - ); - // Queue the block for later processing. - self.import_queue - .enqueue_full_blocks(vec![block], peer_id.clone()); - // Send a hello to learn of the clients best slot so we can then sync the require - // parent(s). - network.send_rpc_request( - peer_id.clone(), - RPCRequest::Hello(self.chain.hello_message()), - ); - // Forward the block onto our peers. - // - // Note: this may need to be changed if we decide to only forward blocks if we have - // all required info. - true - } - Ok(BlockProcessingOutcome::InvalidBlock(InvalidBlock::FutureSlot { - present_slot, - block_slot, - })) => { - if block_slot - present_slot > FUTURE_SLOT_TOLERANCE { - // The block is too far in the future, drop it. - warn!( - self.log, "NewGossipBlock"; - "msg" => "future block rejected", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - // Do not forward the block around to peers. - false - } else { - // The block is in the future, but not too far. - warn!( - self.log, "NewGossipBlock"; - "msg" => "queuing future block", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - // Queue the block for later processing. - self.import_queue.enqueue_full_blocks(vec![block], peer_id); - // Forward the block around to peers. - true + SHOULD_FORWARD_GOSSIP_BLOCK } - } - Ok(outcome) => { - if outcome.is_invalid() { - // The peer has sent a block which is fundamentally invalid. - warn!( - self.log, "NewGossipBlock"; - "msg" => "invalid block from peer", - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", peer_id), - ); - // Disconnect the peer - network.disconnect(peer_id, GoodbyeReason::Fault); - // Do not forward the block to peers. - false - } else if outcome.sucessfully_processed() { - // The block was valid and we processed it successfully. - info!( - self.log, "NewGossipBlock"; - "msg" => "block import successful", - "peer" => format!("{:?}", peer_id), - ); - // Forward the block to peers - true - } else { - // The block wasn't necessarily invalid but we didn't process it successfully. - // This condition shouldn't be reached. - error!( - self.log, "NewGossipBlock"; - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", outcome), - ); - // Do not forward the block on. - false + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot => { + self.import_queue + .enqueue_full_blocks(vec![block], peer_id.clone()); + + SHOULD_FORWARD_GOSSIP_BLOCK } - } - Err(e) => { - // We encountered an error whilst processing the block. + // Note: known blocks are forwarded on the gossip network. // - // Blocks should not be able to trigger errors, instead they should be flagged as - // invalid. - error!( - self.log, "NewGossipBlock"; - "msg" => "internal error in processing block.", - "error" => format!("{:?}", e), - ); - // Do not forward the block to peers. - false + // We rely upon the lower layers (libp2p) to stop loops occuring from re-gossiped + // blocks. + BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, + _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, } + } else { + SHOULD_NOT_FORWARD_GOSSIP_BLOCK } } @@ -691,19 +558,15 @@ impl SimpleSync { /// Not currently implemented. pub fn on_attestation_gossip( &mut self, - peer_id: PeerId, + _peer_id: PeerId, msg: Attestation, _network: &mut NetworkContext, ) { - info!( - self.log, - "NewAttestationGossip"; - "peer" => format!("{:?}", peer_id), - ); - match self.chain.process_attestation(msg) { - Ok(()) => info!(self.log, "ImportedAttestation"), - Err(e) => warn!(self.log, "InvalidAttestation"; "error" => format!("{:?}", e)), + Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"), + Err(e) => { + warn!(self.log, "InvalidAttestation"; "source" => "gossip", "error" => format!("{:?}", e)) + } } } @@ -713,55 +576,32 @@ impl SimpleSync { /// the queue. pub fn process_import_queue(&mut self, network: &mut NetworkContext) { let mut successful = 0; - let mut invalid = 0; - let mut errored = 0; // Loop through all of the complete blocks in the queue. for (block_root, block, sender) in self.import_queue.complete_blocks() { - match self.chain.process_block(block) { - Ok(outcome) => { - if outcome.is_invalid() { - invalid += 1; - warn!( - self.log, - "InvalidBlock"; - "sender_peer_id" => format!("{:?}", sender), - "reason" => format!("{:?}", outcome), - ); - network.disconnect(sender, GoodbyeReason::Fault); - break; - } + let processing_result = self.process_block(sender, block.clone(), network, &"gossip"); - // If this results to true, the item will be removed from the queue. - if outcome.sucessfully_processed() { - successful += 1; - self.import_queue.remove(block_root); - } else { - debug!( - self.log, - "ProcessImportQueue"; - "msg" => "Block not imported", - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", sender), - ); - } - } - Err(e) => { - errored += 1; - error!(self.log, "BlockProcessingError"; "error" => format!("{:?}", e)); - } + let should_dequeue = match processing_result { + Some(BlockProcessingOutcome::ParentUnknown { .. }) => false, + Some(BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + }) if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot => false, + _ => true, + }; + + if processing_result == Some(BlockProcessingOutcome::Processed) { + successful += 1; + } + + if should_dequeue { + self.import_queue.remove(block_root); } } if successful > 0 { info!(self.log, "Imported {} blocks", successful) } - if invalid > 0 { - warn!(self.log, "Rejected {} invalid blocks", invalid) - } - if errored > 0 { - warn!(self.log, "Failed to process {} blocks", errored) - } } /// Request some `BeaconBlockRoots` from the remote peer. @@ -833,17 +673,140 @@ impl SimpleSync { }) } - /// Returns `true` if the given slot is finalized in our chain. - fn slot_is_finalized(&self, slot: Slot) -> bool { - slot <= self - .chain - .hello_message() - .latest_finalized_epoch - .start_slot(self.chain.get_spec().slots_per_epoch) - } - /// Generates our current state in the form of a HELLO RPC message. pub fn generate_hello(&self) -> HelloMessage { - self.chain.hello_message() + hello_message(&self.chain) + } + + /// Processes the `block` that was received from `peer_id`. + /// + /// If the block was submitted to the beacon chain without internal error, `Some(outcome)` is + /// returned, otherwise `None` is returned. Note: `Some(_)` does not necessarily indicate that + /// the block was successfully processed or valid. + /// + /// This function performs the following duties: + /// + /// - Attempting to import the block into the beacon chain. + /// - Logging + /// - Requesting unavailable blocks (e.g., if parent is unknown). + /// - Disconnecting faulty nodes. + /// + /// This function does not remove processed blocks from the import queue. + fn process_block( + &mut self, + peer_id: PeerId, + block: BeaconBlock, + network: &mut NetworkContext, + source: &str, + ) -> Option { + let processing_result = self.chain.process_block(block.clone()); + + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed => { + info!( + self.log, "Imported block from network"; + "source" => source, + "slot" => block.slot, + "peer" => format!("{:?}", peer_id), + ); + } + BlockProcessingOutcome::ParentUnknown { parent } => { + // The block was valid and we processed it successfully. + debug!( + self.log, "ParentBlockUnknown"; + "source" => source, + "parent_root" => format!("{}", parent), + "peer" => format!("{:?}", peer_id), + ); + + // Send a hello to learn of the clients best slot so we can then sync the require + // parent(s). + network.send_rpc_request( + peer_id.clone(), + RPCRequest::Hello(hello_message(&self.chain)), + ); + + // Explicitly request the parent block from the peer. + // + // It is likely that this is duplicate work, given we already send a hello + // request. However, I believe there are some edge-cases where the hello + // message doesn't suffice, so we perform this request as well. + self.request_block_headers( + peer_id, + BeaconBlockHeadersRequest { + start_root: parent, + start_slot: block.slot - 1, + max_headers: 1, + skip_slots: 0, + }, + network, + ) + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + warn!( + self.log, "FutureBlock"; + "source" => source, + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + "peer" => format!("{:?}", peer_id), + ); + network.disconnect(peer_id, GoodbyeReason::Fault); + } else { + // The block is in the future, but not too far. + debug!( + self.log, "QueuedFutureBlock"; + "source" => source, + "msg" => "queuing future block, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + "peer" => format!("{:?}", peer_id), + ); + } + } + _ => { + debug!( + self.log, "InvalidBlock"; + "source" => source, + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + "peer" => format!("{:?}", peer_id), + ); + } + } + + Some(outcome) + } else { + error!( + self.log, "BlockProcessingFailure"; + "source" => source, + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + + None + } + } +} + +/// Build a `HelloMessage` representing the state of the given `beacon_chain`. +fn hello_message(beacon_chain: &BeaconChain) -> HelloMessage { + let spec = &beacon_chain.spec; + let state = &beacon_chain.head().beacon_state; + + HelloMessage { + network_id: spec.chain_id, + latest_finalized_root: state.finalized_root, + latest_finalized_epoch: state.finalized_epoch, + best_root: beacon_chain.head().beacon_block_root, + best_slot: state.slot, } } diff --git a/beacon_node/rpc/Cargo.toml b/beacon_node/rpc/Cargo.toml index a361c94abc..d707cc36d4 100644 --- a/beacon_node/rpc/Cargo.toml +++ b/beacon_node/rpc/Cargo.toml @@ -20,6 +20,8 @@ clap = "2.32.0" store = { path = "../store" } dirs = "1.0.3" futures = "0.1.23" +serde = "1.0" +serde_derive = "1.0" slog = "^2.2.3" slog-term = "^2.4.0" slog-async = "^2.3.0" diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 6048e42b1b..0f585b7e75 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,6 +1,8 @@ -use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_libp2p::PubsubMessage; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; +use network::NetworkMessage; use protos::services::{ AttestationData as AttestationDataProto, ProduceAttestationDataRequest, ProduceAttestationDataResponse, PublishAttestationRequest, PublishAttestationResponse, @@ -14,6 +16,7 @@ use types::Attestation; #[derive(Clone)] pub struct AttestationServiceInstance { pub chain: Arc>, + pub network_chan: crossbeam_channel::Sender, pub log: slog::Logger, } @@ -34,7 +37,7 @@ impl AttestationService for AttestationServiceInstance { // verify the slot, drop lock on state afterwards { let slot_requested = req.get_slot(); - let state = self.chain.get_state(); + let state = &self.chain.current_state(); // Start by performing some checks // Check that the AttestionData is for the current slot (otherwise it will not be valid) @@ -124,7 +127,7 @@ impl AttestationService for AttestationServiceInstance { } }; - match self.chain.process_attestation(attestation) { + match self.chain.process_attestation(attestation.clone()) { Ok(_) => { // Attestation was successfully processed. info!( @@ -133,6 +136,25 @@ impl AttestationService for AttestationServiceInstance { "type" => "valid_attestation", ); + // TODO: Obtain topics from the network service properly. + let topic = types::TopicBuilder::new("beacon_chain".to_string()).build(); + let message = PubsubMessage::Attestation(attestation); + + // Publish the attestation to the p2p network via gossipsub. + self.network_chan + .send(NetworkMessage::Publish { + topics: vec![topic], + message: Box::new(message), + }) + .unwrap_or_else(|e| { + error!( + self.log, + "PublishAttestation"; + "type" => "failed to publish to gossipsub", + "error" => format!("{:?}", e) + ); + }); + resp.set_success(true); } Err(e) => { diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index e553b79e7f..d36cb1f313 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use crossbeam_channel; use eth2_libp2p::PubsubMessage; use futures::Future; @@ -95,14 +95,12 @@ impl BeaconBlockService for BeaconBlockServiceInstance { Ok(block) => { match self.chain.process_block(block.clone()) { Ok(outcome) => { - if outcome.sucessfully_processed() { + if outcome == BlockProcessingOutcome::Processed { // Block was successfully processed. info!( self.log, - "PublishBeaconBlock"; - "type" => "valid_block", + "Valid block from RPC"; "block_slot" => block.slot, - "outcome" => format!("{:?}", outcome) ); // TODO: Obtain topics from the network service properly. @@ -126,12 +124,11 @@ impl BeaconBlockService for BeaconBlockServiceInstance { }); resp.set_success(true); - } else if outcome.is_invalid() { - // Block was invalid. + } else { + // Block was not successfully processed. warn!( self.log, - "PublishBeaconBlock"; - "type" => "invalid_block", + "Invalid block from RPC"; "outcome" => format!("{:?}", outcome) ); @@ -139,17 +136,6 @@ impl BeaconBlockService for BeaconBlockServiceInstance { resp.set_msg( format!("InvalidBlock: {:?}", outcome).as_bytes().to_vec(), ); - } else { - // Some failure during processing. - warn!( - self.log, - "PublishBeaconBlock"; - "type" => "unable_to_import", - "outcome" => format!("{:?}", outcome) - ); - - resp.set_success(false); - resp.set_msg(format!("other: {:?}", outcome).as_bytes().to_vec()); } } Err(e) => { diff --git a/beacon_node/rpc/src/beacon_chain.rs b/beacon_node/rpc/src/beacon_chain.rs deleted file mode 100644 index b0a490137c..0000000000 --- a/beacon_node/rpc/src/beacon_chain.rs +++ /dev/null @@ -1,71 +0,0 @@ -use beacon_chain::BeaconChain as RawBeaconChain; -use beacon_chain::{ - parking_lot::{RwLockReadGuard, RwLockWriteGuard}, - types::{BeaconState, ChainSpec, Signature}, - AttestationValidationError, BlockProductionError, -}; -pub use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessingOutcome}; -use types::{Attestation, AttestationData, BeaconBlock, EthSpec}; - -/// The RPC's API to the beacon chain. -pub trait BeaconChain: Send + Sync { - fn get_spec(&self) -> &ChainSpec; - - fn get_state(&self) -> RwLockReadGuard>; - - fn get_mut_state(&self) -> RwLockWriteGuard>; - - fn process_block(&self, block: BeaconBlock) - -> Result; - - fn produce_block( - &self, - randao_reveal: Signature, - ) -> Result<(BeaconBlock, BeaconState), BlockProductionError>; - - fn produce_attestation_data(&self, shard: u64) -> Result; - - fn process_attestation( - &self, - attestation: Attestation, - ) -> Result<(), AttestationValidationError>; -} - -impl BeaconChain for RawBeaconChain { - fn get_spec(&self) -> &ChainSpec { - &self.spec - } - - fn get_state(&self) -> RwLockReadGuard> { - self.state.read() - } - - fn get_mut_state(&self) -> RwLockWriteGuard> { - self.state.write() - } - - fn process_block( - &self, - block: BeaconBlock, - ) -> Result { - self.process_block(block) - } - - fn produce_block( - &self, - randao_reveal: Signature, - ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - self.produce_block(randao_reveal) - } - - fn produce_attestation_data(&self, shard: u64) -> Result { - self.produce_attestation_data(shard) - } - - fn process_attestation( - &self, - attestation: Attestation, - ) -> Result<(), AttestationValidationError> { - self.process_attestation(attestation) - } -} diff --git a/beacon_node/rpc/src/beacon_node.rs b/beacon_node/rpc/src/beacon_node.rs index a923bbb356..631601ac95 100644 --- a/beacon_node/rpc/src/beacon_node.rs +++ b/beacon_node/rpc/src/beacon_node.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::Future; use grpcio::{RpcContext, UnarySink}; use protos::services::{Empty, Fork, NodeInfoResponse}; @@ -22,7 +22,7 @@ impl BeaconNodeService for BeaconNodeServiceInstance { node_info.set_version(version::version()); // get the chain state - let state = self.chain.get_state(); + let state = &self.chain.head().beacon_state; let state_fork = state.fork.clone(); let genesis_time = state.genesis_time; @@ -32,10 +32,12 @@ impl BeaconNodeService for BeaconNodeServiceInstance { fork.set_current_version(state_fork.current_version.to_vec()); fork.set_epoch(state_fork.epoch.into()); + let spec = &self.chain.spec; + node_info.set_fork(fork); node_info.set_genesis_time(genesis_time); - node_info.set_genesis_slot(self.chain.get_spec().genesis_slot.as_u64()); - node_info.set_chain_id(u32::from(self.chain.get_spec().chain_id)); + node_info.set_genesis_slot(spec.genesis_slot.as_u64()); + node_info.set_chain_id(u32::from(spec.chain_id)); // send the node_info the requester let error_log = self.log.clone(); diff --git a/beacon_node/rpc/src/config.rs b/beacon_node/rpc/src/config.rs index e21c2f7a89..0f031ddc60 100644 --- a/beacon_node/rpc/src/config.rs +++ b/beacon_node/rpc/src/config.rs @@ -1,7 +1,9 @@ +use clap::ArgMatches; +use serde_derive::{Deserialize, Serialize}; use std::net::Ipv4Addr; /// RPC Configuration -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { /// Enable the RPC server. pub enabled: bool, @@ -20,3 +22,23 @@ impl Default for Config { } } } + +impl Config { + pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { + if args.is_present("rpc") { + self.enabled = true; + } + + if let Some(rpc_address) = args.value_of("rpc-address") { + self.listen_address = rpc_address + .parse::() + .map_err(|_| "rpc-address is not IPv4 address")?; + } + + if let Some(rpc_port) = args.value_of("rpc-port") { + self.port = rpc_port.parse::().map_err(|_| "rpc-port is not u16")?; + } + + Ok(()) + } +} diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index 9646135b68..11de6eb6a6 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -1,15 +1,14 @@ mod attestation; mod beacon_block; -pub mod beacon_chain; mod beacon_node; pub mod config; mod validator; use self::attestation::AttestationServiceInstance; use self::beacon_block::BeaconBlockServiceInstance; -use self::beacon_chain::{BeaconChain, BeaconChainTypes}; use self::beacon_node::BeaconNodeServiceInstance; use self::validator::ValidatorServiceInstance; +use beacon_chain::{BeaconChain, BeaconChainTypes}; pub use config::Config as RPCConfig; use futures::Future; use grpcio::{Environment, ServerBuilder}; @@ -28,7 +27,8 @@ pub fn start_server( network_chan: crossbeam_channel::Sender, beacon_chain: Arc>, log: &slog::Logger, -) -> exit_future::Signal { +) -> exit_future::Signal +{ let log = log.new(o!("Service"=>"RPC")); let env = Arc::new(Environment::new(1)); @@ -47,7 +47,7 @@ pub fn start_server( let beacon_block_service = { let instance = BeaconBlockServiceInstance { chain: beacon_chain.clone(), - network_chan, + network_chan: network_chan.clone(), log: log.clone(), }; create_beacon_block_service(instance) @@ -62,6 +62,7 @@ pub fn start_server( let attestation_service = { let instance = AttestationServiceInstance { chain: beacon_chain.clone(), + network_chan, log: log.clone(), }; create_attestation_service(instance) diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 16437f2a38..b13303e25c 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; @@ -7,14 +7,13 @@ use protos::services_grpc::ValidatorService; use slog::{trace, warn}; use ssz::Decode; use std::sync::Arc; -use types::{Epoch, RelativeEpoch}; +use types::{Epoch, EthSpec, RelativeEpoch}; #[derive(Clone)] pub struct ValidatorServiceInstance { pub chain: Arc>, pub log: slog::Logger, } -//TODO: Refactor Errors impl ValidatorService for ValidatorServiceInstance { /// For a list of validator public keys, this function returns the slot at which each @@ -29,14 +28,15 @@ impl ValidatorService for ValidatorServiceInstance { let validators = req.get_validators(); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); - let spec = self.chain.get_spec(); - let state = self.chain.get_state(); + let spec = &self.chain.spec; + let state = &self.chain.current_state(); let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); let relative_epoch = - match RelativeEpoch::from_epoch(state.slot.epoch(spec.slots_per_epoch), epoch) { + match RelativeEpoch::from_epoch(state.slot.epoch(T::EthSpec::slots_per_epoch()), epoch) + { Ok(v) => v, Err(e) => { // incorrect epoch @@ -52,7 +52,7 @@ impl ValidatorService for ValidatorServiceInstance { }; let validator_proposers: Result, _> = epoch - .slot_iter(spec.slots_per_epoch) + .slot_iter(T::EthSpec::slots_per_epoch()) .map(|slot| state.get_beacon_proposer_index(slot, relative_epoch, &spec)) .collect(); let validator_proposers = match validator_proposers { @@ -148,7 +148,7 @@ impl ValidatorService for ValidatorServiceInstance { // check if the validator needs to propose a block if let Some(slot) = validator_proposers.iter().position(|&v| val_index == v) { duty.set_block_production_slot( - epoch.start_slot(spec.slots_per_epoch).as_u64() + slot as u64, + epoch.start_slot(T::EthSpec::slots_per_epoch()).as_u64() + slot as u64, ); } else { // no blocks to propose this epoch diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index ef21218827..d6274befc8 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -3,8 +3,15 @@ extern crate slog; mod run; use clap::{App, Arg}; -use client::ClientConfig; -use slog::{error, o, Drain}; +use client::{ClientConfig, Eth2Config}; +use eth2_config::{get_data_dir, read_from_file, write_to_file}; +use slog::{crit, o, Drain}; +use std::path::PathBuf; + +pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; + +pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; +pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; fn main() { let decorator = slog_term::TermDecorator::new().build(); @@ -22,28 +29,22 @@ fn main() { .long("datadir") .value_name("DIR") .help("Data directory for keys and databases.") - .takes_value(true), + .takes_value(true) + .default_value(DEFAULT_DATA_DIR), ) // network related arguments .arg( Arg::with_name("listen-address") .long("listen-address") .value_name("Listen Address") - .help("The Network address to listen for p2p connections.") - .takes_value(true), - ) - .arg( - Arg::with_name("port") - .long("port") - .value_name("PORT") - .help("Network listen port for p2p connections.") + .help("One or more comma-delimited multi-addresses to listen for p2p connections.") .takes_value(true), ) .arg( Arg::with_name("boot-nodes") .long("boot-nodes") .value_name("BOOTNODES") - .help("A list of comma separated multi addresses representing bootnodes to connect to.") + .help("One or more comma-delimited multi-addresses to bootstrap the p2p network.") .takes_value(true), ) // rpc related arguments @@ -68,6 +69,28 @@ fn main() { .help("Listen port for RPC endpoint.") .takes_value(true), ) + // HTTP related arguments + .arg( + Arg::with_name("http") + .long("http") + .value_name("HTTP") + .help("Enable the HTTP server.") + .takes_value(false), + ) + .arg( + Arg::with_name("http-address") + .long("http-address") + .value_name("HTTPADDRESS") + .help("Listen address for the HTTP server.") + .takes_value(true), + ) + .arg( + Arg::with_name("http-port") + .long("http-port") + .value_name("HTTPPORT") + .help("Listen port for the HTTP server.") + .takes_value(true), + ) .arg( Arg::with_name("db") .long("db") @@ -77,13 +100,101 @@ fn main() { .possible_values(&["disk", "memory"]) .default_value("memory"), ) + .arg( + Arg::with_name("spec-constants") + .long("spec-constants") + .value_name("TITLE") + .short("s") + .help("The title of the spec constants for chain config.") + .takes_value(true) + .possible_values(&["mainnet", "minimal"]) + .default_value("minimal"), + ) + .arg( + Arg::with_name("recent-genesis") + .long("recent-genesis") + .short("r") + .help("When present, genesis will be within 30 minutes prior. Only for testing"), + ) .get_matches(); - // invalid arguments, panic - let config = ClientConfig::parse_args(matches, &logger).unwrap(); + let data_dir = match get_data_dir(&matches, PathBuf::from(DEFAULT_DATA_DIR)) { + Ok(dir) => dir, + Err(e) => { + crit!(logger, "Failed to initialize data dir"; "error" => format!("{:?}", e)); + return; + } + }; - match run::run_beacon_node(config, &logger) { + let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); + + // Attempt to lead the `ClientConfig` from disk. + // + // If file doesn't exist, create a new, default one. + let mut client_config = match read_from_file::(client_config_path.clone()) { + Ok(Some(c)) => c, + Ok(None) => { + let default = ClientConfig::default(); + if let Err(e) = write_to_file(client_config_path, &default) { + crit!(logger, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); + return; + } + default + } + Err(e) => { + crit!(logger, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); + return; + } + }; + + // Ensure the `data_dir` in the config matches that supplied to the CLI. + client_config.data_dir = data_dir.clone(); + + // Update the client config with any CLI args. + match client_config.apply_cli_args(&matches) { + Ok(()) => (), + Err(s) => { + crit!(logger, "Failed to parse ClientConfig CLI arguments"; "error" => s); + return; + } + }; + + let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); + + // Attempt to load the `Eth2Config` from file. + // + // If the file doesn't exist, create a default one depending on the CLI flags. + let mut eth2_config = match read_from_file::(eth2_config_path.clone()) { + Ok(Some(c)) => c, + Ok(None) => { + let default = match matches.value_of("spec-constants") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + _ => unreachable!(), // Guarded by slog. + }; + if let Err(e) = write_to_file(eth2_config_path, &default) { + crit!(logger, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + default + } + Err(e) => { + crit!(logger, "Failed to load/generate an Eth2Config"; "error" => format!("{:?}", e)); + return; + } + }; + + // Update the eth2 config with any CLI flags. + match eth2_config.apply_cli_args(&matches) { + Ok(()) => (), + Err(s) => { + crit!(logger, "Failed to parse Eth2Config CLI arguments"; "error" => s); + return; + } + }; + + match run::run_beacon_node(client_config, eth2_config, &logger) { Ok(_) => {} - Err(e) => error!(logger, "Beacon node failed because {:?}", e), + Err(e) => crit!(logger, "Beacon node failed to start"; "reason" => format!("{:}", e)), } } diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 6ec65a92d4..834f9a4281 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,62 +1,115 @@ use client::{ - error, notifier, BeaconChainTypes, Client, ClientConfig, DBType, TestnetDiskBeaconChainTypes, - TestnetMemoryBeaconChainTypes, + error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config, + InitialiseBeaconChain, }; use futures::sync::oneshot; use futures::Future; -use slog::info; +use slog::{error, info, warn}; use std::cell::RefCell; +use std::path::Path; +use std::path::PathBuf; +use store::{DiskStore, MemoryStore}; use tokio::runtime::Builder; use tokio::runtime::Runtime; use tokio::runtime::TaskExecutor; use tokio_timer::clock::Clock; +use types::{MainnetEthSpec, MinimalEthSpec}; -pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Result<()> { +pub fn run_beacon_node( + client_config: ClientConfig, + eth2_config: Eth2Config, + log: &slog::Logger, +) -> error::Result<()> { let runtime = Builder::new() .name_prefix("main-") .clock(Clock::system()) .build() .map_err(|e| format!("{:?}", e))?; - // Log configuration - info!(log, "Listening on {:?}", &config.net_conf.listen_addresses; - "data_dir" => &config.data_dir.to_str(), - "port" => &config.net_conf.listen_port); - let executor = runtime.executor(); - match config.db_type { - DBType::Disk => { - info!( - log, - "BeaconNode starting"; - "type" => "TestnetDiskBeaconChainTypes" - ); - let client: Client = - Client::new(config, log.clone(), &executor)?; + let db_path: PathBuf = client_config + .db_path() + .ok_or_else::(|| "Unable to access database path".into())?; + let db_type = &client_config.db_type; + let spec_constants = eth2_config.spec_constants.clone(); - run(client, executor, runtime, log) - } - DBType::Memory => { - info!( - log, - "BeaconNode starting"; - "type" => "TestnetMemoryBeaconChainTypes" - ); - let client: Client = - Client::new(config, log.clone(), &executor)?; + let other_client_config = client_config.clone(); - run(client, executor, runtime, log) + warn!( + log, + "This software is EXPERIMENTAL and provides no guarantees or warranties." + ); + + let result = match (db_type.as_str(), spec_constants.as_str()) { + ("disk", "minimal") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), + ("memory", "minimal") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), + ("disk", "mainnet") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), + ("memory", "mainnet") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), + (db_type, spec) => { + error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); + Err("Unknown specification and/or db_type.".into()) } + }; + + if result.is_ok() { + info!( + log, + "Started beacon node"; + "p2p_listen_addresses" => format!("{:?}", &other_client_config.network.listen_addresses()), + "data_dir" => format!("{:?}", other_client_config.data_dir()), + "spec_constants" => &spec_constants, + "db_type" => &other_client_config.db_type, + ); } + + result } -pub fn run( - client: Client, +pub fn run( + db_path: &Path, + client_config: ClientConfig, + eth2_config: Eth2Config, executor: TaskExecutor, mut runtime: Runtime, log: &slog::Logger, -) -> error::Result<()> { +) -> error::Result<()> +where + T: BeaconChainTypes + InitialiseBeaconChain + Clone + Send + Sync + 'static, + T::Store: OpenDatabase, +{ + let store = T::Store::open_database(&db_path)?; + + let client: Client = Client::new(client_config, eth2_config, store, log.clone(), &executor)?; + // run service until ctrl-c let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); @@ -84,3 +137,22 @@ pub fn run( runtime.shutdown_on_idle().wait().unwrap(); Ok(()) } + +/// A convenience trait, providing a method to open a database. +/// +/// Panics if unable to open the database. +pub trait OpenDatabase: Sized { + fn open_database(path: &Path) -> error::Result; +} + +impl OpenDatabase for MemoryStore { + fn open_database(_path: &Path) -> error::Result { + Ok(MemoryStore::open()) + } +} + +impl OpenDatabase for DiskStore { + fn open_database(path: &Path) -> error::Result { + DiskStore::open(path).map_err(|e| format!("Unable to open database: {:?}", e).into()) + } +} diff --git a/beacon_node/store/src/block_at_slot.rs b/beacon_node/store/src/block_at_slot.rs index 4a8abaefda..5a0dd68615 100644 --- a/beacon_node/store/src/block_at_slot.rs +++ b/beacon_node/store/src/block_at_slot.rs @@ -25,15 +25,23 @@ pub fn get_block_at_preceeding_slot( slot: Slot, start_root: Hash256, ) -> Result, Error> { - let mut root = start_root; + Ok(match get_at_preceeding_slot(store, slot, start_root)? { + Some((hash, bytes)) => Some((hash, BeaconBlock::from_ssz_bytes(&bytes)?)), + None => None, + }) +} +fn get_at_preceeding_slot( + store: &T, + slot: Slot, + mut root: Hash256, +) -> Result)>, Error> { loop { if let Some(bytes) = get_block_bytes(store, root)? { let this_slot = read_slot_from_block_bytes(&bytes)?; if this_slot == slot { - let block = BeaconBlock::from_ssz_bytes(&bytes)?; - break Ok(Some((root, block))); + break Ok(Some((root, bytes))); } else if this_slot < slot { break Ok(None); } else { @@ -53,7 +61,7 @@ mod tests { #[test] fn read_slot() { - let spec = FewValidatorsEthSpec::spec(); + let spec = MinimalEthSpec::default_spec(); let test_slot = |slot: Slot| { let mut block = BeaconBlock::empty(&spec); @@ -77,7 +85,7 @@ mod tests { #[test] fn read_previous_block_root() { - let spec = FewValidatorsEthSpec::spec(); + let spec = MinimalEthSpec::default_spec(); let test_root = |root: Hash256| { let mut block = BeaconBlock::empty(&spec); @@ -122,7 +130,7 @@ mod tests { fn chain_without_skips() { let n: usize = 10; let store = MemoryStore::open(); - let spec = FewValidatorsEthSpec::spec(); + let spec = MinimalEthSpec::default_spec(); let slots: Vec = (0..n).collect(); let blocks_and_roots = build_chain(&store, &slots, &spec); @@ -146,7 +154,7 @@ mod tests { #[test] fn chain_with_skips() { let store = MemoryStore::open(); - let spec = FewValidatorsEthSpec::spec(); + let spec = MinimalEthSpec::default_spec(); let slots = vec![0, 1, 2, 5]; diff --git a/beacon_node/store/src/disk_db.rs b/beacon_node/store/src/disk_db.rs index eb2b885c6b..669547ab97 100644 --- a/beacon_node/store/src/disk_db.rs +++ b/beacon_node/store/src/disk_db.rs @@ -1,6 +1,5 @@ extern crate rocksdb; -// use super::stores::COLUMNS; use super::{ClientDB, DBError, DBValue}; use rocksdb::Error as RocksError; use rocksdb::{Options, DB}; diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index 91f8d52de6..418fcade1e 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -1,6 +1,8 @@ use crate::*; use ssz::{Decode, Encode}; +mod beacon_state; + impl StoreItem for BeaconBlock { fn db_column() -> DBColumn { DBColumn::BeaconBlock @@ -14,17 +16,3 @@ impl StoreItem for BeaconBlock { Self::from_ssz_bytes(bytes).map_err(Into::into) } } - -impl StoreItem for BeaconState { - fn db_column() -> DBColumn { - DBColumn::BeaconState - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &mut [u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } -} diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs new file mode 100644 index 0000000000..591663fe05 --- /dev/null +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -0,0 +1,64 @@ +use crate::*; +use ssz::{Decode, DecodeError, Encode}; +use ssz_derive::{Decode, Encode}; +use std::convert::TryInto; +use types::beacon_state::{CommitteeCache, CACHED_EPOCHS}; + +/// A container for storing `BeaconState` components. +#[derive(Encode, Decode)] +struct StorageContainer { + state_bytes: Vec, + committee_caches_bytes: Vec>, +} + +impl StorageContainer { + /// Create a new instance for storing a `BeaconState`. + pub fn new(state: &BeaconState) -> Self { + let mut committee_caches_bytes = vec![]; + + for cache in state.committee_caches[..].iter() { + committee_caches_bytes.push(cache.as_ssz_bytes()); + } + + Self { + state_bytes: state.as_ssz_bytes(), + committee_caches_bytes, + } + } +} + +impl TryInto> for StorageContainer { + type Error = Error; + + fn try_into(self) -> Result, Error> { + let mut state: BeaconState = BeaconState::from_ssz_bytes(&self.state_bytes)?; + + for i in 0..CACHED_EPOCHS { + let bytes = &self.committee_caches_bytes.get(i).ok_or_else(|| { + Error::SszDecodeError(DecodeError::BytesInvalid( + "Insufficient committees for BeaconState".to_string(), + )) + })?; + + state.committee_caches[i] = CommitteeCache::from_ssz_bytes(bytes)?; + } + + Ok(state) + } +} + +impl StoreItem for BeaconState { + fn db_column() -> DBColumn { + DBColumn::BeaconState + } + + fn as_store_bytes(&self) -> Vec { + let container = StorageContainer::new(self); + container.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &mut [u8]) -> Result { + let container = StorageContainer::from_ssz_bytes(bytes)?; + container.try_into() + } +} diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 09aec46fa7..699861e3ae 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -5,10 +5,14 @@ use leveldb::database::Database; use leveldb::error::Error as LevelDBError; use leveldb::options::{Options, ReadOptions, WriteOptions}; use std::path::Path; +use std::sync::Arc; /// A wrapped leveldb database. +#[derive(Clone)] pub struct LevelDB { - db: Database, + // Note: this `Arc` is only included because of an artificial constraint by gRPC. Hopefully we + // can remove this one day. + db: Arc>, } impl LevelDB { @@ -18,7 +22,7 @@ impl LevelDB { options.create_if_missing = true; - let db = Database::open(path, options)?; + let db = Arc::new(Database::open(path, options)?); Ok(Self { db }) } diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 086a16c269..048c054f52 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,19 +1,23 @@ use super::{Error, Store}; use parking_lot::RwLock; use std::collections::HashMap; +use std::sync::Arc; type DBHashMap = HashMap, Vec>; /// A thread-safe `HashMap` wrapper. +#[derive(Clone)] pub struct MemoryStore { - db: RwLock, + // Note: this `Arc` is only included because of an artificial constraint by gRPC. Hopefully we + // can remove this one day. + db: Arc>, } impl MemoryStore { /// Create a new, empty database. pub fn open() -> Self { Self { - db: RwLock::new(HashMap::new()), + db: Arc::new(RwLock::new(HashMap::new())), } } diff --git a/eth2/fork_choice/Cargo.toml b/eth2/fork_choice/Cargo.toml index f2e6825ed9..e37e415e49 100644 --- a/eth2/fork_choice/Cargo.toml +++ b/eth2/fork_choice/Cargo.toml @@ -4,6 +4,10 @@ version = "0.1.0" authors = ["Age Manning "] edition = "2018" +[[bench]] +name = "benches" +harness = false + [dependencies] store = { path = "../../beacon_node/store" } ssz = { path = "../utils/ssz" } @@ -12,6 +16,7 @@ log = "0.4.6" bit-vec = "0.5.0" [dev-dependencies] +criterion = "0.2" hex = "0.3.2" yaml-rust = "0.4.2" bls = { path = "../utils/bls" } diff --git a/eth2/fork_choice/benches/benches.rs b/eth2/fork_choice/benches/benches.rs new file mode 100644 index 0000000000..f311e1ccbb --- /dev/null +++ b/eth2/fork_choice/benches/benches.rs @@ -0,0 +1,75 @@ +use criterion::Criterion; +use criterion::{criterion_group, criterion_main, Benchmark}; +use fork_choice::{test_utils::TestingForkChoiceBuilder, ForkChoice, OptimizedLMDGhost}; +use std::sync::Arc; +use store::MemoryStore; +use types::{ChainSpec, EthSpec, MainnetEthSpec}; + +pub type TestedForkChoice = OptimizedLMDGhost; +pub type TestedEthSpec = MainnetEthSpec; + +/// Helper function to setup a builder and spec. +fn setup( + validator_count: usize, + chain_length: usize, +) -> ( + TestingForkChoiceBuilder, + ChainSpec, +) { + let store = MemoryStore::open(); + let builder: TestingForkChoiceBuilder = + TestingForkChoiceBuilder::new(validator_count, chain_length, Arc::new(store)); + let spec = TestedEthSpec::default_spec(); + + (builder, spec) +} + +/// Benches adding blocks to fork_choice. +fn add_block(c: &mut Criterion) { + let validator_count = 16; + let chain_length = 100; + + let (builder, spec) = setup(validator_count, chain_length); + + c.bench( + &format!("{}_blocks", chain_length), + Benchmark::new("add_blocks", move |b| { + b.iter(|| { + let mut fc = builder.build::>(); + for (root, block) in builder.chain.iter().skip(1) { + fc.add_block(block, root, &spec).unwrap(); + } + }) + }) + .sample_size(10), + ); +} + +/// Benches fork choice head finding. +fn find_head(c: &mut Criterion) { + let validator_count = 16; + let chain_length = 64 * 2; + + let (builder, spec) = setup(validator_count, chain_length); + + let mut fc = builder.build::>(); + for (root, block) in builder.chain.iter().skip(1) { + fc.add_block(block, root, &spec).unwrap(); + } + + let head_root = builder.chain.last().unwrap().0; + for i in 0..validator_count { + fc.add_attestation(i as u64, &head_root, &spec).unwrap(); + } + + c.bench( + &format!("{}_blocks", chain_length), + Benchmark::new("find_head", move |b| { + b.iter(|| fc.find_head(&builder.genesis_root(), &spec).unwrap()) + }) + .sample_size(10), + ); +} + +criterion_group!(benches, add_block, find_head); +criterion_main!(benches); diff --git a/eth2/fork_choice/examples/example.rs b/eth2/fork_choice/examples/example.rs new file mode 100644 index 0000000000..a912c3753c --- /dev/null +++ b/eth2/fork_choice/examples/example.rs @@ -0,0 +1,40 @@ +use fork_choice::{test_utils::TestingForkChoiceBuilder, ForkChoice, OptimizedLMDGhost}; +use std::sync::Arc; +use store::{MemoryStore, Store}; +use types::{BeaconBlock, ChainSpec, EthSpec, Hash256, MainnetEthSpec}; + +fn main() { + let validator_count = 16; + let chain_length = 100; + let repetitions = 50; + + let store = MemoryStore::open(); + let builder: TestingForkChoiceBuilder = + TestingForkChoiceBuilder::new(validator_count, chain_length, Arc::new(store)); + + let fork_choosers: Vec> = (0..repetitions) + .into_iter() + .map(|_| builder.build()) + .collect(); + + let spec = &MainnetEthSpec::default_spec(); + + println!("Running {} times...", repetitions); + for fc in fork_choosers { + do_thing(fc, &builder.chain, builder.genesis_root(), spec); + } +} + +#[inline(never)] +fn do_thing, S: Store>( + mut fc: F, + chain: &[(Hash256, BeaconBlock)], + genesis_root: Hash256, + spec: &ChainSpec, +) { + for (root, block) in chain.iter().skip(1) { + fc.add_block(block, root, spec).unwrap(); + } + + let _head = fc.find_head(&genesis_root, spec).unwrap(); +} diff --git a/eth2/fork_choice/src/bitwise_lmd_ghost.rs b/eth2/fork_choice/src/bitwise_lmd_ghost.rs index c159def357..3ed57bf4dd 100644 --- a/eth2/fork_choice/src/bitwise_lmd_ghost.rs +++ b/eth2/fork_choice/src/bitwise_lmd_ghost.rs @@ -48,18 +48,6 @@ pub struct BitwiseLMDGhost { } impl BitwiseLMDGhost { - pub fn new(store: Arc) -> Self { - BitwiseLMDGhost { - cache: HashMap::new(), - ancestors: vec![HashMap::new(); 16], - latest_attestation_targets: HashMap::new(), - children: HashMap::new(), - max_known_height: SlotHeight::new(0), - store, - _phantom: PhantomData, - } - } - /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to /// weighted votes. pub fn get_latest_votes( @@ -80,7 +68,7 @@ impl BitwiseLMDGhost { .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; let active_validator_indices = - current_state.get_active_validator_indices(block_slot.epoch(spec.slots_per_epoch)); + current_state.get_active_validator_indices(block_slot.epoch(E::slots_per_epoch())); for index in active_validator_indices { let balance = std::cmp::min(current_state.balances[index], spec.max_effective_balance) @@ -130,12 +118,12 @@ impl BitwiseLMDGhost { // not in the cache recursively search for ancestors using a log-lookup if let Some(ancestor) = { - let ancestor_lookup = self.ancestors + let ancestor_lookup = *self.ancestors [log2_int((block_height - target_height - 1u64).as_u64()) as usize] .get(&block_hash) //TODO: Panic if we can't lookup and fork choice fails .expect("All blocks should be added to the ancestor log lookup table"); - self.get_ancestor(*ancestor_lookup, target_height, &spec) + self.get_ancestor(ancestor_lookup, target_height, &spec) } { // add the result to the cache self.cache.insert(cache_key, ancestor); @@ -161,7 +149,7 @@ impl BitwiseLMDGhost { // these have already been weighted by balance for (hash, votes) in latest_votes.iter() { if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) { - let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0); + let current_vote_value = *current_votes.get(&ancestor).unwrap_or_else(|| &0); current_votes.insert(ancestor, current_vote_value + *votes); total_vote_count += votes; } @@ -227,7 +215,19 @@ impl BitwiseLMDGhost { } } -impl ForkChoice for BitwiseLMDGhost { +impl ForkChoice for BitwiseLMDGhost { + fn new(store: Arc) -> Self { + BitwiseLMDGhost { + cache: HashMap::new(), + ancestors: vec![HashMap::new(); 16], + latest_attestation_targets: HashMap::new(), + children: HashMap::new(), + max_known_height: SlotHeight::new(0), + store, + _phantom: PhantomData, + } + } + fn add_block( &mut self, block: &BeaconBlock, diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs index ffc40e6c6b..f4a1fa5cb6 100644 --- a/eth2/fork_choice/src/lib.rs +++ b/eth2/fork_choice/src/lib.rs @@ -20,9 +20,9 @@ pub mod bitwise_lmd_ghost; pub mod longest_chain; pub mod optimized_lmd_ghost; pub mod slow_lmd_ghost; +pub mod test_utils; -// use store::stores::BeaconBlockAtSlotError; -// use store::DBError; +use std::sync::Arc; use store::Error as DBError; use types::{BeaconBlock, ChainSpec, Hash256}; @@ -34,7 +34,10 @@ pub use slow_lmd_ghost::SlowLMDGhost; /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures /// which can be built in block processing through the `add_block` and `add_attestation` functions. /// The main fork choice algorithm is specified in `find_head -pub trait ForkChoice: Send + Sync { +pub trait ForkChoice: Send + Sync { + /// Create a new `ForkChoice` which reads from `store`. + fn new(store: Arc) -> Self; + /// Called when a block has been added. Allows generic block-level data structures to be /// built for a given fork-choice. fn add_block( @@ -78,22 +81,6 @@ impl From for ForkChoiceError { } } -/* -impl From for ForkChoiceError { - fn from(e: BeaconBlockAtSlotError) -> ForkChoiceError { - match e { - BeaconBlockAtSlotError::UnknownBeaconBlock(hash) => { - ForkChoiceError::MissingBeaconBlock(hash) - } - BeaconBlockAtSlotError::InvalidBeaconBlock(hash) => { - ForkChoiceError::MissingBeaconBlock(hash) - } - BeaconBlockAtSlotError::DBError(string) => ForkChoiceError::StorageError(string), - } - } -} -*/ - /// Fork choice options that are currently implemented. #[derive(Debug, Clone)] pub enum ForkChoiceAlgorithm { diff --git a/eth2/fork_choice/src/longest_chain.rs b/eth2/fork_choice/src/longest_chain.rs index 11453cf493..08e47cf393 100644 --- a/eth2/fork_choice/src/longest_chain.rs +++ b/eth2/fork_choice/src/longest_chain.rs @@ -10,16 +10,14 @@ pub struct LongestChain { store: Arc, } -impl LongestChain { - pub fn new(store: Arc) -> Self { +impl ForkChoice for LongestChain { + fn new(store: Arc) -> Self { LongestChain { head_block_hashes: Vec::new(), store, } } -} -impl ForkChoice for LongestChain { fn add_block( &mut self, block: &BeaconBlock, diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs index 01ad4dd29b..7a48c461e4 100644 --- a/eth2/fork_choice/src/optimized_lmd_ghost.rs +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -48,18 +48,6 @@ pub struct OptimizedLMDGhost { } impl OptimizedLMDGhost { - pub fn new(store: Arc) -> Self { - OptimizedLMDGhost { - cache: HashMap::new(), - ancestors: vec![HashMap::new(); 16], - latest_attestation_targets: HashMap::new(), - children: HashMap::new(), - max_known_height: SlotHeight::new(0), - store, - _phantom: PhantomData, - } - } - /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to /// weighted votes. pub fn get_latest_votes( @@ -80,7 +68,7 @@ impl OptimizedLMDGhost { .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; let active_validator_indices = - current_state.get_active_validator_indices(block_slot.epoch(spec.slots_per_epoch)); + current_state.get_active_validator_indices(block_slot.epoch(E::slots_per_epoch())); for index in active_validator_indices { let balance = std::cmp::min(current_state.balances[index], spec.max_effective_balance) @@ -130,12 +118,12 @@ impl OptimizedLMDGhost { // not in the cache recursively search for ancestors using a log-lookup if let Some(ancestor) = { - let ancestor_lookup = self.ancestors + let ancestor_lookup = *self.ancestors [log2_int((block_height - target_height - 1u64).as_u64()) as usize] .get(&block_hash) //TODO: Panic if we can't lookup and fork choice fails .expect("All blocks should be added to the ancestor log lookup table"); - self.get_ancestor(*ancestor_lookup, target_height, &spec) + self.get_ancestor(ancestor_lookup, target_height, &spec) } { // add the result to the cache self.cache.insert(cache_key, ancestor); @@ -161,7 +149,7 @@ impl OptimizedLMDGhost { // these have already been weighted by balance for (hash, votes) in latest_votes.iter() { if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) { - let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0); + let current_vote_value = *current_votes.get(&ancestor).unwrap_or_else(|| &0); current_votes.insert(ancestor, current_vote_value + *votes); total_vote_count += votes; } @@ -198,7 +186,19 @@ impl OptimizedLMDGhost { } } -impl ForkChoice for OptimizedLMDGhost { +impl ForkChoice for OptimizedLMDGhost { + fn new(store: Arc) -> Self { + OptimizedLMDGhost { + cache: HashMap::new(), + ancestors: vec![HashMap::new(); 16], + latest_attestation_targets: HashMap::new(), + children: HashMap::new(), + max_known_height: SlotHeight::new(0), + store, + _phantom: PhantomData, + } + } + fn add_block( &mut self, block: &BeaconBlock, diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index bde918d7e9..9b7a204002 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -20,15 +20,6 @@ pub struct SlowLMDGhost { } impl SlowLMDGhost { - pub fn new(store: Arc) -> Self { - SlowLMDGhost { - latest_attestation_targets: HashMap::new(), - children: HashMap::new(), - store, - _phantom: PhantomData, - } - } - /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to /// weighted votes. pub fn get_latest_votes( @@ -49,7 +40,7 @@ impl SlowLMDGhost { .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; let active_validator_indices = - current_state.get_active_validator_indices(block_slot.epoch(spec.slots_per_epoch)); + current_state.get_active_validator_indices(block_slot.epoch(E::slots_per_epoch())); for index in active_validator_indices { let balance = std::cmp::min(current_state.balances[index], spec.max_effective_balance) @@ -92,7 +83,16 @@ impl SlowLMDGhost { } } -impl ForkChoice for SlowLMDGhost { +impl ForkChoice for SlowLMDGhost { + fn new(store: Arc) -> Self { + SlowLMDGhost { + latest_attestation_targets: HashMap::new(), + children: HashMap::new(), + store, + _phantom: PhantomData, + } + } + /// Process when a block is added fn add_block( &mut self, diff --git a/eth2/fork_choice/src/test_utils.rs b/eth2/fork_choice/src/test_utils.rs new file mode 100644 index 0000000000..8ef20108aa --- /dev/null +++ b/eth2/fork_choice/src/test_utils.rs @@ -0,0 +1,91 @@ +use crate::ForkChoice; +use std::marker::PhantomData; +use std::sync::Arc; +use store::Store; +use types::{ + test_utils::{SeedableRng, TestRandom, TestingBeaconStateBuilder, XorShiftRng}, + BeaconBlock, BeaconState, EthSpec, Hash256, Keypair, MainnetEthSpec, +}; + +/// Creates a chain of blocks and produces `ForkChoice` instances with pre-filled stores. +pub struct TestingForkChoiceBuilder { + store: Arc, + pub chain: Vec<(Hash256, BeaconBlock)>, + _phantom: PhantomData, +} + +impl TestingForkChoiceBuilder { + pub fn new(validator_count: usize, chain_length: usize, store: Arc) -> Self { + let chain = + get_chain_of_blocks::(chain_length, validator_count, store.clone()); + + Self { + store, + chain, + _phantom: PhantomData, + } + } + + pub fn genesis_root(&self) -> Hash256 { + self.chain[0].0 + } + + /// Return a new `ForkChoice` instance with a chain stored in it's `Store`. + pub fn build>(&self) -> F { + F::new(self.store.clone()) + } +} + +fn get_state(validator_count: usize) -> BeaconState { + let spec = T::default_spec(); + + let builder: TestingBeaconStateBuilder = + TestingBeaconStateBuilder::from_single_keypair(validator_count, &Keypair::random(), &spec); + let (state, _keypairs) = builder.build(); + state +} + +/// Generates a chain of blocks of length `len`. +/// +/// Creates a `BeaconState` for the block and stores it in `Store`, along with the block. +/// +/// Returns the chain of blocks. +fn get_chain_of_blocks( + len: usize, + validator_count: usize, + store: Arc, +) -> Vec<(Hash256, BeaconBlock)> { + let spec = T::default_spec(); + let mut blocks_and_roots: Vec<(Hash256, BeaconBlock)> = vec![]; + let mut unique_hashes = (0..).map(Hash256::from); + let mut random_block = BeaconBlock::random_for_test(&mut XorShiftRng::from_seed([42; 16])); + random_block.previous_block_root = Hash256::zero(); + let beacon_state = get_state::(validator_count); + + for i in 0..len { + let slot = spec.genesis_slot + i as u64; + + // Generate and store the state. + let mut state = beacon_state.clone(); + state.slot = slot; + let state_root = unique_hashes.next().unwrap(); + store.put(&state_root, &state).unwrap(); + + // Generate the block. + let mut block = random_block.clone(); + block.slot = slot; + block.state_root = state_root; + + // Chain all the blocks to their parents. + if i > 0 { + block.previous_block_root = blocks_and_roots[i - 1].0; + } + + // Store the block. + let block_root = unique_hashes.next().unwrap(); + store.put(&block_root, &block).unwrap(); + blocks_and_roots.push((block_root, block)); + } + + blocks_and_roots +} diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 1ed4faa8e9..39e70a7ddb 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -1,20 +1,17 @@ #![cfg(not(debug_assertions))] -// Tests the available fork-choice algorithms - +/// Tests the available fork-choice algorithms pub use beacon_chain::BeaconChain; use bls::Signature; use store::MemoryStore; use store::Store; // use env_logger::{Builder, Env}; -use fork_choice::{ - BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, OptimizedLMDGhost, SlowLMDGhost, -}; +use fork_choice::{BitwiseLMDGhost, ForkChoice, LongestChain, OptimizedLMDGhost, SlowLMDGhost}; use std::collections::HashMap; use std::sync::Arc; use std::{fs::File, io::prelude::*, path::PathBuf}; use types::test_utils::TestingBeaconStateBuilder; use types::{ - BeaconBlock, BeaconBlockBody, Eth1Data, EthSpec, FoundationEthSpec, Hash256, Keypair, Slot, + BeaconBlock, BeaconBlockBody, Eth1Data, EthSpec, Hash256, Keypair, MainnetEthSpec, Slot, }; use yaml_rust::yaml; @@ -25,8 +22,7 @@ fn test_optimized_lmd_ghost() { // set up logging // Builder::from_env(Env::default().default_filter_or("trace")).init(); - test_yaml_vectors( - ForkChoiceAlgorithm::OptimizedLMDGhost, + test_yaml_vectors::>( "tests/lmd_ghost_test_vectors.yaml", 100, ); @@ -37,8 +33,7 @@ fn test_bitwise_lmd_ghost() { // set up logging //Builder::from_env(Env::default().default_filter_or("trace")).init(); - test_yaml_vectors( - ForkChoiceAlgorithm::BitwiseLMDGhost, + test_yaml_vectors::>( "tests/bitwise_lmd_ghost_test_vectors.yaml", 100, ); @@ -46,8 +41,7 @@ fn test_bitwise_lmd_ghost() { #[test] fn test_slow_lmd_ghost() { - test_yaml_vectors( - ForkChoiceAlgorithm::SlowLMDGhost, + test_yaml_vectors::>( "tests/lmd_ghost_test_vectors.yaml", 100, ); @@ -55,16 +49,11 @@ fn test_slow_lmd_ghost() { #[test] fn test_longest_chain() { - test_yaml_vectors( - ForkChoiceAlgorithm::LongestChain, - "tests/longest_chain_test_vectors.yaml", - 100, - ); + test_yaml_vectors::>("tests/longest_chain_test_vectors.yaml", 100); } // run a generic test over given YAML test vectors -fn test_yaml_vectors( - fork_choice_algo: ForkChoiceAlgorithm, +fn test_yaml_vectors>( yaml_file_path: &str, emulated_validators: usize, // the number of validators used to give weights. ) { @@ -72,7 +61,7 @@ fn test_yaml_vectors( let test_cases = load_test_cases_from_yaml(yaml_file_path); // default vars - let spec = FoundationEthSpec::spec(); + let spec = MainnetEthSpec::default_spec(); let zero_hash = Hash256::zero(); let eth1_data = Eth1Data { deposit_count: 0, @@ -96,8 +85,7 @@ fn test_yaml_vectors( // process the tests for test_case in test_cases { // setup a fresh test - let (mut fork_choice, store, state_root) = - setup_inital_state(&fork_choice_algo, emulated_validators); + let (mut fork_choice, store, state_root) = setup_inital_state::(emulated_validators); // keep a hashmap of block_id's to block_hashes (random hashes to abstract block_id) //let mut block_id_map: HashMap = HashMap::new(); @@ -206,35 +194,19 @@ fn load_test_cases_from_yaml(file_path: &str) -> Vec { doc["test_cases"].as_vec().unwrap().clone() } -// initialise a single validator and state. All blocks will reference this state root. -fn setup_inital_state( - fork_choice_algo: &ForkChoiceAlgorithm, - num_validators: usize, -) -> (Box, Arc, Hash256) { +fn setup_inital_state( + // fork_choice_algo: &ForkChoiceAlgorithm, + num_validators: usize +) -> (T, Arc, Hash256) +where + T: ForkChoice, +{ let store = Arc::new(MemoryStore::open()); - // the fork choice instantiation - let fork_choice: Box = match fork_choice_algo { - ForkChoiceAlgorithm::OptimizedLMDGhost => { - let f: OptimizedLMDGhost = - OptimizedLMDGhost::new(store.clone()); - Box::new(f) - } - ForkChoiceAlgorithm::BitwiseLMDGhost => { - let f: BitwiseLMDGhost = - BitwiseLMDGhost::new(store.clone()); - Box::new(f) - } - ForkChoiceAlgorithm::SlowLMDGhost => { - let f: SlowLMDGhost = SlowLMDGhost::new(store.clone()); - Box::new(f) - } - ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(store.clone())), - }; + let fork_choice = ForkChoice::new(store.clone()); + let spec = MainnetEthSpec::default_spec(); - let spec = FoundationEthSpec::spec(); - - let mut state_builder: TestingBeaconStateBuilder = + let mut state_builder: TestingBeaconStateBuilder = TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec); state_builder.build_caches(&spec).unwrap(); let (state, _keypairs) = state_builder.build(); diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 0affba3f44..ec7d5aa905 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -675,12 +675,12 @@ mod tests { .collect() } - fn test_state(rng: &mut XorShiftRng) -> (ChainSpec, BeaconState) { - let spec = FoundationEthSpec::spec(); + fn test_state(rng: &mut XorShiftRng) -> (ChainSpec, BeaconState) { + let spec = MainnetEthSpec::default_spec(); let mut state = BeaconState::random_for_test(rng); - state.fork = Fork::genesis(&spec); + state.fork = Fork::genesis(MainnetEthSpec::genesis_epoch()); (spec, state) } @@ -721,27 +721,27 @@ mod tests { fn attestation_test_state( num_committees: usize, ) -> (BeaconState, Vec, ChainSpec) { - let spec = E::spec(); + let spec = E::default_spec(); let num_validators = - num_committees * spec.slots_per_epoch as usize * spec.target_committee_size; + num_committees * E::slots_per_epoch() as usize * spec.target_committee_size; let mut state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( num_validators, &spec, ); - let slot_offset = 1000 * spec.slots_per_epoch + spec.slots_per_epoch / 2; + let slot_offset = 1000 * E::slots_per_epoch() + E::slots_per_epoch() / 2; let slot = spec.genesis_slot + slot_offset; - state_builder.teleport_to_slot(slot, &spec); + state_builder.teleport_to_slot(slot); state_builder.build_caches(&spec).unwrap(); let (state, keypairs) = state_builder.build(); - (state, keypairs, FoundationEthSpec::spec()) + (state, keypairs, MainnetEthSpec::default_spec()) } #[test] fn test_attestation_score() { let (ref mut state, ref keypairs, ref spec) = - attestation_test_state::(1); + attestation_test_state::(1); let slot = state.slot - 1; let committees = state @@ -793,7 +793,7 @@ mod tests { #[test] fn attestation_aggregation_insert_get_prune() { let (ref mut state, ref keypairs, ref spec) = - attestation_test_state::(1); + attestation_test_state::(1); let op_pool = OperationPool::new(); @@ -852,7 +852,7 @@ mod tests { // But once we advance to more than an epoch after the attestation, it should prune it // out of existence. - state.slot += 2 * spec.slots_per_epoch; + state.slot += 2 * MainnetEthSpec::slots_per_epoch(); op_pool.prune_attestations(state); assert_eq!(op_pool.num_attestations(), 0); } @@ -861,7 +861,7 @@ mod tests { #[test] fn attestation_duplicate() { let (ref mut state, ref keypairs, ref spec) = - attestation_test_state::(1); + attestation_test_state::(1); let op_pool = OperationPool::new(); @@ -898,7 +898,7 @@ mod tests { #[test] fn attestation_pairwise_overlapping() { let (ref mut state, ref keypairs, ref spec) = - attestation_test_state::(1); + attestation_test_state::(1); let op_pool = OperationPool::new(); @@ -946,7 +946,7 @@ mod tests { let big_step_size = 4; let (ref mut state, ref keypairs, ref spec) = - attestation_test_state::(big_step_size); + attestation_test_state::(big_step_size); let op_pool = OperationPool::new(); diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index 9bff3a2e3e..e89305ce44 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -17,13 +17,13 @@ pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; /// Run the benchmarking suite on a foundation spec with 16,384 validators. pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) { - let spec = ChainSpec::foundation(); + let spec = ChainSpec::mainnet(); let mut builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); // Set the state to be just before an epoch transition. - let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); + let target_slot = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch()); builder.teleport_to_slot(target_slot, &spec); // Builds all caches; benches will not contain shuffling/committee building times. @@ -38,10 +38,10 @@ pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: u // Assert that the state has an attestations for each committee that is able to include an // attestation in the state. let committees_per_epoch = spec.get_epoch_committee_count(validator_count); - let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; + let committees_per_slot = committees_per_epoch / T::slots_per_epoch(); let previous_epoch_attestations = committees_per_epoch; let current_epoch_attestations = - committees_per_slot * (spec.slots_per_epoch - spec.min_attestation_inclusion_delay); + committees_per_slot * (T::slots_per_epoch() - spec.min_attestation_inclusion_delay); assert_eq!( state.latest_attestations.len() as u64, previous_epoch_attestations + current_epoch_attestations, diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 0cf797147f..3d884c3d80 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -25,7 +25,7 @@ pub fn block_processing_worst_case(c: &mut Criterion) { ); // Use the specifications from the Eth2.0 spec. - let spec = ChainSpec::foundation(); + let spec = ChainSpec::mainnet(); // Create a builder for configuring the block and state for benching. let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec); @@ -34,7 +34,7 @@ pub fn block_processing_worst_case(c: &mut Criterion) { bench_builder.maximize_block_operations(&spec); // Set the state and block to be in the last slot of the 4th epoch. - let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); + let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch()); bench_builder.set_slot(last_slot_of_epoch, &spec); // Build all the state caches so the build times aren't included in the benches. @@ -59,7 +59,7 @@ pub fn block_processing_reasonable_case(c: &mut Criterion) { ); // Use the specifications from the Eth2.0 spec. - let spec = ChainSpec::foundation(); + let spec = ChainSpec::mainnet(); // Create a builder for configuring the block and state for benching. let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec); @@ -67,13 +67,13 @@ pub fn block_processing_reasonable_case(c: &mut Criterion) { // Set the number of included operations to what we might expect normally. bench_builder.num_proposer_slashings = 0; bench_builder.num_attester_slashings = 0; - bench_builder.num_attestations = (spec.shard_count / spec.slots_per_epoch) as usize; + bench_builder.num_attestations = (spec.shard_count / T::slots_per_epoch()) as usize; bench_builder.num_deposits = 2; bench_builder.num_exits = 2; bench_builder.num_transfers = 2; // Set the state and block to be in the last slot of the 4th epoch. - let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); + let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch()); bench_builder.set_slot(last_slot_of_epoch, &spec); // Build all the state caches so the build times aren't included in the benches. diff --git a/eth2/state_processing/src/get_genesis_state.rs b/eth2/state_processing/src/get_genesis_state.rs index 0fe78c1ed3..18a1e7c357 100644 --- a/eth2/state_processing/src/get_genesis_state.rs +++ b/eth2/state_processing/src/get_genesis_state.rs @@ -25,8 +25,8 @@ pub fn get_genesis_beacon_state( // Process genesis activations. for validator in &mut state.validator_registry { if validator.effective_balance >= spec.max_effective_balance { - validator.activation_eligibility_epoch = spec.genesis_epoch; - validator.activation_epoch = spec.genesis_epoch; + validator.activation_eligibility_epoch = T::genesis_epoch(); + validator.activation_epoch = T::genesis_epoch(); } } diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 016abe8091..c65c6d17b9 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -142,7 +142,7 @@ pub fn verify_block_signature( [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?]; let domain = spec.get_domain( - block.slot.epoch(spec.slots_per_epoch), + block.slot.epoch(T::slots_per_epoch()), Domain::BeaconProposer, &state.fork, ); @@ -174,7 +174,7 @@ pub fn process_randao( block.body.randao_reveal.verify( &state.current_epoch().tree_hash_root()[..], spec.get_domain( - block.slot.epoch(spec.slots_per_epoch), + block.slot.epoch(T::slots_per_epoch()), Domain::Randao, &state.fork ), diff --git a/eth2/state_processing/src/per_block_processing/block_processing_builder.rs b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs index 35e736d5fe..05a5a2de24 100644 --- a/eth2/state_processing/src/per_block_processing/block_processing_builder.rs +++ b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs @@ -22,8 +22,8 @@ impl BlockProcessingBuilder { } } - pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) { - self.state_builder.teleport_to_slot(slot, &spec); + pub fn set_slot(&mut self, slot: Slot) { + self.state_builder.teleport_to_slot(slot); } pub fn build_caches(&mut self, spec: &ChainSpec) { @@ -55,11 +55,13 @@ impl BlockProcessingBuilder { let keypair = &keypairs[proposer_index]; match randao_sk { - Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec), - None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec), + Some(sk) => builder.set_randao_reveal::(&sk, &state.fork, spec), + None => builder.set_randao_reveal::(&keypair.sk, &state.fork, spec), } - let block = self.block_builder.build(&keypair.sk, &state.fork, spec); + let block = self + .block_builder + .build::(&keypair.sk, &state.fork, spec); (block, state) } diff --git a/eth2/state_processing/src/per_block_processing/tests.rs b/eth2/state_processing/src/per_block_processing/tests.rs index 28ed9c4f0c..6c9593c496 100644 --- a/eth2/state_processing/src/per_block_processing/tests.rs +++ b/eth2/state_processing/src/per_block_processing/tests.rs @@ -9,7 +9,7 @@ pub const VALIDATOR_COUNT: usize = 10; #[test] fn valid_block_ok() { - let spec = FoundationEthSpec::spec(); + let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec); let (block, mut state) = builder.build(None, None, &spec); @@ -20,7 +20,7 @@ fn valid_block_ok() { #[test] fn invalid_block_header_state_slot() { - let spec = FoundationEthSpec::spec(); + let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec); let (mut block, mut state) = builder.build(None, None, &spec); @@ -39,7 +39,7 @@ fn invalid_block_header_state_slot() { #[test] fn invalid_parent_block_root() { - let spec = FoundationEthSpec::spec(); + let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec); let invalid_parent_root = Hash256::from([0xAA; 32]); let (block, mut state) = builder.build(None, Some(invalid_parent_root), &spec); @@ -59,14 +59,14 @@ fn invalid_parent_block_root() { #[test] fn invalid_block_signature() { - let spec = FoundationEthSpec::spec(); + let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec); let (mut block, mut state) = builder.build(None, None, &spec); // sign the block with a keypair that is not the expected proposer let keypair = Keypair::random(); let message = block.signed_root(); - let epoch = block.slot.epoch(spec.slots_per_epoch); + let epoch = block.slot.epoch(MainnetEthSpec::slots_per_epoch()); let domain = spec.get_domain(epoch, Domain::BeaconProposer, &state.fork); block.signature = Signature::new(&message, domain, &keypair.sk); @@ -82,7 +82,7 @@ fn invalid_block_signature() { #[test] fn invalid_randao_reveal_signature() { - let spec = FoundationEthSpec::spec(); + let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec); // sign randao reveal with random keypair @@ -100,12 +100,13 @@ fn invalid_randao_reveal_signature() { ); } -fn get_builder(spec: &ChainSpec) -> (BlockProcessingBuilder) { +fn get_builder(spec: &ChainSpec) -> (BlockProcessingBuilder) { let mut builder = BlockProcessingBuilder::new(VALIDATOR_COUNT, &spec); // Set the state and block to be in the last slot of the 4th epoch. - let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); - builder.set_slot(last_slot_of_epoch, &spec); + let last_slot_of_epoch = + (MainnetEthSpec::genesis_epoch() + 4).end_slot(MainnetEthSpec::slots_per_epoch()); + builder.set_slot(last_slot_of_epoch); builder.build_caches(&spec); (builder) diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs index 1058c0d218..379b921f4d 100644 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -68,7 +68,7 @@ fn validate_attestation_parametric( } ); verify!( - state.slot <= attestation_slot + spec.slots_per_epoch, + state.slot <= attestation_slot + T::slots_per_epoch(), Invalid::IncludedTooLate { state: state.slot, attestation: attestation_slot diff --git a/eth2/state_processing/src/per_block_processing/verify_indexed_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_indexed_attestation.rs index 6581e516de..f06f1e9000 100644 --- a/eth2/state_processing/src/per_block_processing/verify_indexed_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_indexed_attestation.rs @@ -49,7 +49,7 @@ fn verify_indexed_attestation_parametric( ); // Check that nobody signed with custody bit 1 (to be removed in phase 1) - if custody_bit_1_indices.len() > 0 { + if !custody_bit_1_indices.is_empty() { invalid!(Invalid::CustodyBitfieldHasSetBits); } @@ -96,7 +96,7 @@ where state .validator_registry .get(validator_idx as usize) - .ok_or(Error::Invalid(Invalid::UnknownValidator(validator_idx))) + .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(validator_idx))) .map(|validator| { aggregate_pubkey.add(&validator.pubkey); aggregate_pubkey diff --git a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs index 98a9a248cc..744427ad98 100644 --- a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs @@ -21,8 +21,8 @@ pub fn verify_proposer_slashing( })?; verify!( - proposer_slashing.header_1.slot.epoch(spec.slots_per_epoch) - == proposer_slashing.header_2.slot.epoch(spec.slots_per_epoch), + proposer_slashing.header_1.slot.epoch(T::slots_per_epoch()) + == proposer_slashing.header_2.slot.epoch(T::slots_per_epoch()), Invalid::ProposalEpochMismatch( proposer_slashing.header_1.slot, proposer_slashing.header_2.slot @@ -40,7 +40,7 @@ pub fn verify_proposer_slashing( ); verify!( - verify_header_signature( + verify_header_signature::( &proposer_slashing.header_1, &proposer.pubkey, &state.fork, @@ -49,7 +49,7 @@ pub fn verify_proposer_slashing( Invalid::BadProposal1Signature ); verify!( - verify_header_signature( + verify_header_signature::( &proposer_slashing.header_2, &proposer.pubkey, &state.fork, @@ -66,7 +66,7 @@ pub fn verify_proposer_slashing( /// Returns `true` if the signature is valid. /// /// Spec v0.6.1 -fn verify_header_signature( +fn verify_header_signature( header: &BeaconBlockHeader, pubkey: &PublicKey, fork: &Fork, @@ -74,7 +74,7 @@ fn verify_header_signature( ) -> bool { let message = header.signed_root(); let domain = spec.get_domain( - header.slot.epoch(spec.slots_per_epoch), + header.slot.epoch(T::slots_per_epoch()), Domain::BeaconProposer, fork, ); diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs index 15c142d90d..de4cef44f0 100644 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -101,7 +101,7 @@ fn verify_transfer_parametric( .get(transfer.sender as usize) .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?; - let epoch = state.slot.epoch(spec.slots_per_epoch); + let epoch = state.slot.epoch(T::slots_per_epoch()); // Ensure one of the following is met: // @@ -136,7 +136,7 @@ fn verify_transfer_parametric( // Verify the transfer signature. let message = transfer.signed_root(); let domain = spec.get_domain( - transfer.slot.epoch(spec.slots_per_epoch), + transfer.slot.epoch(T::slots_per_epoch()), Domain::Transfer, &state.fork, ); diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 42522f4010..05ef7f6586 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -42,7 +42,7 @@ pub fn per_epoch_processing( validator_statuses.process_attestations(&state, spec)?; // Justification and finalization. - process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; + process_justification_and_finalization(state, &validator_statuses.total_balances)?; // Crosslinks. let winning_root_for_shards = process_crosslinks(state, spec)?; @@ -84,9 +84,8 @@ pub fn per_epoch_processing( pub fn process_justification_and_finalization( state: &mut BeaconState, total_balances: &TotalBalances, - spec: &ChainSpec, ) -> Result<(), Error> { - if state.current_epoch() == spec.genesis_epoch { + if state.current_epoch() == T::genesis_epoch() { return Ok(()); } @@ -104,14 +103,14 @@ pub fn process_justification_and_finalization( if total_balances.previous_epoch_target_attesters * 3 >= total_balances.previous_epoch * 2 { state.current_justified_epoch = previous_epoch; state.current_justified_root = - *state.get_block_root_at_epoch(state.current_justified_epoch, spec)?; + *state.get_block_root_at_epoch(state.current_justified_epoch)?; state.justification_bitfield |= 2; } // If the current epoch gets justified, fill the last bit. if total_balances.current_epoch_target_attesters * 3 >= total_balances.current_epoch * 2 { state.current_justified_epoch = current_epoch; state.current_justified_root = - *state.get_block_root_at_epoch(state.current_justified_epoch, spec)?; + *state.get_block_root_at_epoch(state.current_justified_epoch)?; state.justification_bitfield |= 1; } @@ -120,22 +119,22 @@ pub fn process_justification_and_finalization( // The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source. if (bitfield >> 1) % 8 == 0b111 && old_previous_justified_epoch == current_epoch - 3 { state.finalized_epoch = old_previous_justified_epoch; - state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch, spec)?; + state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?; } // The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source. if (bitfield >> 1) % 4 == 0b11 && state.previous_justified_epoch == current_epoch - 2 { state.finalized_epoch = old_previous_justified_epoch; - state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch, spec)?; + state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?; } // The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 2nd as source. if bitfield % 8 == 0b111 && state.current_justified_epoch == current_epoch - 2 { state.finalized_epoch = old_current_justified_epoch; - state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch, spec)?; + state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?; } // The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source. if bitfield % 4 == 0b11 && state.current_justified_epoch == current_epoch - 1 { state.finalized_epoch = old_current_justified_epoch; - state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch, spec)?; + state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?; } Ok(()) @@ -157,7 +156,7 @@ pub fn process_crosslinks( state.previous_crosslinks = state.current_crosslinks.clone(); - for relative_epoch in vec![RelativeEpoch::Previous, RelativeEpoch::Current] { + for &relative_epoch in &[RelativeEpoch::Previous, RelativeEpoch::Current] { let epoch = relative_epoch.into_epoch(state.current_epoch()); for offset in 0..state.get_epoch_committee_count(relative_epoch)? { let shard = @@ -212,7 +211,7 @@ pub fn process_final_updates( } // Update start shard. - state.latest_start_shard = state.next_epoch_start_shard()?; + state.latest_start_shard = state.next_epoch_start_shard(spec)?; // This is a hack to allow us to update index roots and slashed balances for the next epoch. // @@ -241,7 +240,7 @@ pub fn process_final_updates( state.slot -= 1; } - if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / spec.slots_per_epoch) == 0 { + if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / T::slots_per_epoch()) == 0 { let historical_batch = state.historical_batch(); state .historical_roots diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index 7f98f3ae5b..7ddba6f38a 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -39,7 +39,7 @@ pub fn process_rewards_and_penalties( winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { - if state.current_epoch() == spec.genesis_epoch { + if state.current_epoch() == T::genesis_epoch() { return Ok(()); } diff --git a/eth2/state_processing/src/per_epoch_processing/tests.rs b/eth2/state_processing/src/per_epoch_processing/tests.rs index 9841a5551a..9fdc82c6f3 100644 --- a/eth2/state_processing/src/per_epoch_processing/tests.rs +++ b/eth2/state_processing/src/per_epoch_processing/tests.rs @@ -8,13 +8,14 @@ use types::*; fn runs_without_error() { Builder::from_env(Env::default().default_filter_or("error")).init(); - let spec = FewValidatorsEthSpec::spec(); + let spec = MinimalEthSpec::default_spec(); - let mut builder: TestingBeaconStateBuilder = + let mut builder: TestingBeaconStateBuilder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); - let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); - builder.teleport_to_slot(target_slot, &spec); + let target_slot = + (MinimalEthSpec::genesis_epoch() + 4).end_slot(MinimalEthSpec::slots_per_epoch()); + builder.teleport_to_slot(target_slot); let (mut state, _keypairs) = builder.build(); diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index ed1b968d7d..45ef3419bc 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -223,7 +223,7 @@ impl ValidatorStatuses { if is_from_epoch(a, state.current_epoch()) { status.is_current_epoch_attester = true; - if target_matches_epoch_start_block(a, state, state.current_epoch(), spec)? { + if target_matches_epoch_start_block(a, state, state.current_epoch())? { status.is_current_epoch_target_attester = true; } } else if is_from_epoch(a, state.previous_epoch()) { @@ -233,7 +233,7 @@ impl ValidatorStatuses { let attestation_slot = state.get_attestation_slot(&a.data)?; let inclusion_slot = attestation_slot + a.inclusion_delay; let relative_epoch = - RelativeEpoch::from_slot(state.slot, inclusion_slot, spec.slots_per_epoch)?; + RelativeEpoch::from_slot(state.slot, inclusion_slot, T::slots_per_epoch())?; status.inclusion_info = Some(InclusionInfo { slot: inclusion_slot, distance: a.inclusion_delay, @@ -244,7 +244,7 @@ impl ValidatorStatuses { )?, }); - if target_matches_epoch_start_block(a, state, state.previous_epoch(), spec)? { + if target_matches_epoch_start_block(a, state, state.previous_epoch())? { status.is_previous_epoch_target_attester = true; } @@ -297,7 +297,7 @@ impl ValidatorStatuses { spec: &ChainSpec, ) -> Result<(), BeaconStateError> { // Loop through each slot in the previous epoch. - for slot in state.previous_epoch().slot_iter(spec.slots_per_epoch) { + for slot in state.previous_epoch().slot_iter(T::slots_per_epoch()) { let crosslink_committees_at_slot = state.get_crosslink_committees_at_slot(slot)?; // Loop through each committee in the slot. @@ -336,9 +336,8 @@ fn target_matches_epoch_start_block( a: &PendingAttestation, state: &BeaconState, epoch: Epoch, - spec: &ChainSpec, ) -> Result { - let slot = epoch.start_slot(spec.slots_per_epoch); + let slot = epoch.start_slot(T::slots_per_epoch()); let state_boundary_root = *state.get_block_root(slot)?; Ok(a.data.target_root == state_boundary_root) diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index 97645ab8a9..8adfe988bb 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -1,5 +1,4 @@ use crate::*; -use tree_hash::SignedRoot; use types::*; #[derive(Debug, PartialEq)] @@ -17,7 +16,7 @@ pub fn per_slot_processing( ) -> Result<(), Error> { cache_state(state, spec)?; - if (state.slot > spec.genesis_slot) && ((state.slot + 1) % spec.slots_per_epoch == 0) { + if (state.slot > spec.genesis_slot) && ((state.slot + 1) % T::slots_per_epoch() == 0) { per_epoch_processing(state, spec)?; } @@ -44,7 +43,7 @@ fn cache_state(state: &mut BeaconState, spec: &ChainSpec) -> Resu // Store the previous slot's post state transition root. state.set_state_root(previous_slot, previous_slot_state_root)?; - let latest_block_root = Hash256::from_slice(&state.latest_block_header.signed_root()[..]); + let latest_block_root = state.latest_block_header.canonical_root(); state.set_block_root(previous_slot, latest_block_root)?; // Set the state slot back to what it should be. diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index b5dbf2a4ac..825f50c325 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1,4 +1,4 @@ -use self::committee_cache::{get_active_validator_indices, CommitteeCache}; +use self::committee_cache::get_active_validator_indices; use self::exit_cache::ExitCache; use crate::test_utils::TestRandom; use crate::*; @@ -15,6 +15,7 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::{CachedTreeHash, TreeHash}; +pub use self::committee_cache::CommitteeCache; pub use beacon_state_types::*; mod beacon_state_types; @@ -111,7 +112,7 @@ where pub previous_crosslinks: FixedLenVec, pub latest_block_roots: FixedLenVec, #[compare_fields(as_slice)] - latest_state_roots: FixedLenVec, + pub latest_state_roots: FixedLenVec, #[compare_fields(as_slice)] latest_active_index_roots: FixedLenVec, latest_slashed_balances: FixedLenVec, @@ -163,7 +164,7 @@ impl BeaconState { spec: &ChainSpec, ) -> BeaconState { let initial_crosslink = Crosslink { - epoch: spec.genesis_epoch, + epoch: T::genesis_epoch(), previous_crosslink_root: spec.zero_hash, crosslink_data_root: spec.zero_hash, }; @@ -172,7 +173,7 @@ impl BeaconState { // Misc slot: spec.genesis_slot, genesis_time, - fork: Fork::genesis(spec), + fork: Fork::genesis(T::genesis_epoch()), // Validator registry validator_registry: vec![], // Set later in the function. @@ -188,12 +189,12 @@ impl BeaconState { // Finality previous_epoch_attestations: vec![], current_epoch_attestations: vec![], - previous_justified_epoch: spec.genesis_epoch, - current_justified_epoch: spec.genesis_epoch, + previous_justified_epoch: T::genesis_epoch(), + current_justified_epoch: T::genesis_epoch(), previous_justified_root: spec.zero_hash, current_justified_root: spec.zero_hash, justification_bitfield: 0, - finalized_epoch: spec.genesis_epoch, + finalized_epoch: T::genesis_epoch(), finalized_root: spec.zero_hash, // Recent state @@ -300,10 +301,10 @@ impl BeaconState { Ok(cache.epoch_start_shard()) } - pub fn next_epoch_start_shard(&self) -> Result { + pub fn next_epoch_start_shard(&self, spec: &ChainSpec) -> Result { let cache = self.cache(RelativeEpoch::Current)?; let active_validator_count = cache.active_validator_count(); - let shard_delta = T::get_shard_delta(active_validator_count); + let shard_delta = T::get_shard_delta(active_validator_count, spec.target_committee_size); Ok((self.latest_start_shard + shard_delta) % T::ShardCount::to_u64()) } @@ -422,7 +423,7 @@ impl BeaconState { }; let effective_balance = self.validator_registry[candidate_index].effective_balance; if (effective_balance * MAX_RANDOM_BYTE) - >= (spec.max_effective_balance * random_byte as u64) + >= (spec.max_effective_balance * u64::from(random_byte)) { break candidate_index; } @@ -453,12 +454,8 @@ impl BeaconState { /// /// Spec v0.6.0 // FIXME(sproul): name swap with get_block_root - pub fn get_block_root_at_epoch( - &self, - epoch: Epoch, - spec: &ChainSpec, - ) -> Result<&Hash256, BeaconStateError> { - self.get_block_root(epoch.start_slot(spec.slots_per_epoch)) + pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> { + self.get_block_root(epoch.start_slot(T::slots_per_epoch())) } /// Sets the block root for some given slot. diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index b8c3a46ed2..494884cd83 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -1,5 +1,5 @@ use crate::*; -use fixed_len_vec::typenum::{Unsigned, U1024, U8, U8192}; +use fixed_len_vec::typenum::{Unsigned, U0, U1024, U64, U8, U8192}; use serde_derive::{Deserialize, Serialize}; use std::fmt::Debug; @@ -9,14 +9,24 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { type LatestRandaoMixesLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; type LatestActiveIndexRootsLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; type LatestSlashedExitLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// Note: `SlotsPerEpoch` is not necessarily required to be a compile-time constant. We include + /// it here just for the convenience of not passing `slots_per_epoch` around all the time. + type SlotsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type GenesisEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; - fn spec() -> ChainSpec; + fn default_spec() -> ChainSpec; + + fn genesis_epoch() -> Epoch { + Epoch::new(Self::GenesisEpoch::to_u64()) + } /// Return the number of committees in one epoch. /// /// Spec v0.6.1 - fn get_epoch_committee_count(active_validator_count: usize) -> usize { - let target_committee_size = Self::spec().target_committee_size; + fn get_epoch_committee_count( + active_validator_count: usize, + target_committee_size: usize, + ) -> usize { let shard_count = Self::shard_count(); let slots_per_epoch = Self::slots_per_epoch() as usize; @@ -32,10 +42,10 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { /// Return the number of shards to increment `state.latest_start_shard` by in a given epoch. /// /// Spec v0.6.3 - fn get_shard_delta(active_validator_count: usize) -> u64 { + fn get_shard_delta(active_validator_count: usize, target_committee_size: usize) -> u64 { std::cmp::min( - Self::get_epoch_committee_count(active_validator_count) as u64, - Self::ShardCount::to_u64() - Self::ShardCount::to_u64() / Self::spec().slots_per_epoch, + Self::get_epoch_committee_count(active_validator_count, target_committee_size) as u64, + Self::ShardCount::to_u64() - Self::ShardCount::to_u64() / Self::slots_per_epoch(), ) } @@ -45,21 +55,14 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { /// basic sense. This count is not required to provide any security guarantees regarding /// decentralization, entropy, etc. fn minimum_validator_count() -> usize { - Self::slots_per_epoch() as usize + Self::SlotsPerEpoch::to_usize() } /// Returns the `SLOTS_PER_EPOCH` constant for this specification. /// /// Spec v0.6.1 fn slots_per_epoch() -> u64 { - Self::spec().slots_per_epoch - } - - /// Returns the `SLOTS_PER_EPOCH` constant for this specification. - /// - /// Spec v0.6.1 - fn genesis_epoch() -> Epoch { - Self::spec().genesis_epoch + Self::SlotsPerEpoch::to_u64() } /// Returns the `SHARD_COUNT` constant for this specification. @@ -102,54 +105,40 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { /// /// Spec v0.6.1 #[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] -pub struct FoundationEthSpec; +pub struct MainnetEthSpec; -impl EthSpec for FoundationEthSpec { +impl EthSpec for MainnetEthSpec { type ShardCount = U1024; type SlotsPerHistoricalRoot = U8192; type LatestRandaoMixesLength = U8192; type LatestActiveIndexRootsLength = U8192; type LatestSlashedExitLength = U8192; + type SlotsPerEpoch = U64; + type GenesisEpoch = U0; - fn spec() -> ChainSpec { - ChainSpec::foundation() + fn default_spec() -> ChainSpec { + ChainSpec::mainnet() } } -pub type FoundationBeaconState = BeaconState; +pub type FoundationBeaconState = BeaconState; /// Ethereum Foundation specifications, modified to be suitable for < 1000 validators. #[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] -pub struct FewValidatorsEthSpec; +pub struct MinimalEthSpec; -impl EthSpec for FewValidatorsEthSpec { +impl EthSpec for MinimalEthSpec { type ShardCount = U8; type SlotsPerHistoricalRoot = U8192; type LatestRandaoMixesLength = U8192; type LatestActiveIndexRootsLength = U8192; type LatestSlashedExitLength = U8192; + type SlotsPerEpoch = U8; + type GenesisEpoch = U0; - fn spec() -> ChainSpec { - ChainSpec::few_validators() + fn default_spec() -> ChainSpec { + ChainSpec::minimal() } } -pub type FewValidatorsBeaconState = BeaconState; - -/// Specifications suitable for a small-scale (< 1000 validators) lighthouse testnet. -#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] -pub struct LighthouseTestnetEthSpec; - -impl EthSpec for LighthouseTestnetEthSpec { - type ShardCount = U8; - type SlotsPerHistoricalRoot = U8192; - type LatestRandaoMixesLength = U8192; - type LatestActiveIndexRootsLength = U8192; - type LatestSlashedExitLength = U8192; - - fn spec() -> ChainSpec { - ChainSpec::lighthouse_testnet() - } -} - -pub type LighthouseTestnetBeaconState = BeaconState; +pub type MinimalBeaconState = BeaconState; diff --git a/eth2/types/src/beacon_state/committee_cache.rs b/eth2/types/src/beacon_state/committee_cache.rs index b5de99c835..4efceb8ac5 100644 --- a/eth2/types/src/beacon_state/committee_cache.rs +++ b/eth2/types/src/beacon_state/committee_cache.rs @@ -2,6 +2,7 @@ use super::BeaconState; use crate::*; use core::num::NonZeroUsize; use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; use std::ops::Range; use swap_or_not_shuffle::shuffle_list; @@ -9,7 +10,7 @@ mod tests; /// Computes and stores the shuffling for an epoch. Provides various getters to allow callers to /// read the committees for the given epoch. -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize, Encode, Decode)] pub struct CommitteeCache { initialized_epoch: Option, shuffling: Vec, @@ -44,12 +45,16 @@ impl CommitteeCache { return Err(Error::InsufficientValidators); } - let committee_count = T::get_epoch_committee_count(active_validator_indices.len()) as usize; + let committee_count = T::get_epoch_committee_count( + active_validator_indices.len(), + spec.target_committee_size, + ) as usize; let shuffling_start_shard = match relative_epoch { RelativeEpoch::Current => state.latest_start_shard, RelativeEpoch::Previous => { - let shard_delta = T::get_shard_delta(active_validator_indices.len()); + let shard_delta = + T::get_shard_delta(active_validator_indices.len(), spec.target_committee_size); (state.latest_start_shard + T::ShardCount::to_u64() - shard_delta) % T::ShardCount::to_u64() @@ -57,7 +62,8 @@ impl CommitteeCache { RelativeEpoch::Next => { let current_active_validators = get_active_validator_count(&state.validator_registry, state.current_epoch()); - let shard_delta = T::get_shard_delta(current_active_validators); + let shard_delta = + T::get_shard_delta(current_active_validators, spec.target_committee_size); (state.latest_start_shard + shard_delta) % T::ShardCount::to_u64() } @@ -152,7 +158,6 @@ impl CommitteeCache { let i = self.shuffled_position(validator_index)?; (0..self.committee_count) - .into_iter() .map(|nth_committee| (nth_committee, self.compute_committee_range(nth_committee))) .find(|(_, range)| { if let Some(range) = range { diff --git a/eth2/types/src/beacon_state/committee_cache/tests.rs b/eth2/types/src/beacon_state/committee_cache/tests.rs index 4e31d0fb2c..b7054a7fd1 100644 --- a/eth2/types/src/beacon_state/committee_cache/tests.rs +++ b/eth2/types/src/beacon_state/committee_cache/tests.rs @@ -20,12 +20,12 @@ fn default_values() { } fn new_state(validator_count: usize, slot: Slot) -> BeaconState { - let spec = &T::spec(); + let spec = &T::default_spec(); let mut builder = TestingBeaconStateBuilder::from_single_keypair(validator_count, &Keypair::random(), spec); - builder.teleport_to_slot(slot, spec); + builder.teleport_to_slot(slot); let (state, _keypairs) = builder.build(); @@ -34,8 +34,8 @@ fn new_state(validator_count: usize, slot: Slot) -> BeaconState { #[test] fn fails_without_validators() { - let state = new_state::(0, Slot::new(0)); - let spec = &FewValidatorsEthSpec::spec(); + let state = new_state::(0, Slot::new(0)); + let spec = &MinimalEthSpec::default_spec(); assert_eq!( CommitteeCache::initialized(&state, state.current_epoch(), &spec), @@ -45,8 +45,8 @@ fn fails_without_validators() { #[test] fn initializes_with_the_right_epoch() { - let state = new_state::(16, Slot::new(0)); - let spec = &FewValidatorsEthSpec::spec(); + let state = new_state::(16, Slot::new(0)); + let spec = &MinimalEthSpec::default_spec(); let cache = CommitteeCache::default(); assert_eq!(cache.initialized_epoch, None); @@ -63,14 +63,14 @@ fn initializes_with_the_right_epoch() { #[test] fn shuffles_for_the_right_epoch() { - let num_validators = FewValidatorsEthSpec::minimum_validator_count() * 2; + let num_validators = MinimalEthSpec::minimum_validator_count() * 2; let epoch = Epoch::new(100_000_000); - let slot = epoch.start_slot(FewValidatorsEthSpec::slots_per_epoch()); + let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch()); - let mut state = new_state::(num_validators, slot); - let spec = &FewValidatorsEthSpec::spec(); + let mut state = new_state::(num_validators, slot); + let spec = &MinimalEthSpec::default_spec(); - let distinct_hashes: Vec = (0..FewValidatorsEthSpec::latest_randao_mixes_length()) + let distinct_hashes: Vec = (0..MinimalEthSpec::latest_randao_mixes_length()) .into_iter() .map(|i| Hash256::from(i as u64)) .collect(); @@ -118,17 +118,19 @@ fn shuffles_for_the_right_epoch() { #[test] fn can_start_on_any_shard() { - let num_validators = FewValidatorsEthSpec::minimum_validator_count() * 2; + let num_validators = MinimalEthSpec::minimum_validator_count() * 2; let epoch = Epoch::new(100_000_000); - let slot = epoch.start_slot(FewValidatorsEthSpec::slots_per_epoch()); + let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch()); - let mut state = new_state::(num_validators, slot); - let spec = &FewValidatorsEthSpec::spec(); + let mut state = new_state::(num_validators, slot); + let spec = &MinimalEthSpec::default_spec(); - let shard_delta = FewValidatorsEthSpec::get_shard_delta(num_validators); - let shard_count = FewValidatorsEthSpec::shard_count() as u64; + let target_committee_size = MinimalEthSpec::default_spec().target_committee_size; - for i in 0..FewValidatorsEthSpec::shard_count() as u64 { + let shard_delta = MinimalEthSpec::get_shard_delta(num_validators, target_committee_size); + let shard_count = MinimalEthSpec::shard_count() as u64; + + for i in 0..MinimalEthSpec::shard_count() as u64 { state.latest_start_shard = i; let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); @@ -156,15 +158,17 @@ impl EthSpec for ExcessShardsEthSpec { type LatestRandaoMixesLength = U8192; type LatestActiveIndexRootsLength = U8192; type LatestSlashedExitLength = U8192; + type SlotsPerEpoch = U8; + type GenesisEpoch = U0; - fn spec() -> ChainSpec { - ChainSpec::few_validators() + fn default_spec() -> ChainSpec { + ChainSpec::minimal() } } #[test] fn starts_on_the_correct_shard() { - let spec = &ExcessShardsEthSpec::spec(); + let spec = &ExcessShardsEthSpec::default_spec(); let num_validators = ExcessShardsEthSpec::shard_count(); @@ -206,14 +210,16 @@ fn starts_on_the_correct_shard() { let previous_shards = ExcessShardsEthSpec::get_epoch_committee_count( get_active_validator_count(&state.validator_registry, previous_epoch), + spec.target_committee_size, ); let current_shards = ExcessShardsEthSpec::get_epoch_committee_count( get_active_validator_count(&state.validator_registry, current_epoch), + spec.target_committee_size, + ); + let next_shards = ExcessShardsEthSpec::get_epoch_committee_count( + get_active_validator_count(&state.validator_registry, next_epoch), + spec.target_committee_size, ); - let next_shards = ExcessShardsEthSpec::get_epoch_committee_count(get_active_validator_count( - &state.validator_registry, - next_epoch, - )); assert_eq!( previous_shards as usize, diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 588d24aa8b..316a901512 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -7,7 +7,7 @@ ssz_tests!(FoundationBeaconState); cached_tree_hash_tests!(FoundationBeaconState); fn test_beacon_proposer_index() { - let spec = T::spec(); + let spec = T::default_spec(); let relative_epoch = RelativeEpoch::Current; // Build a state for testing. @@ -53,7 +53,7 @@ fn test_beacon_proposer_index() { #[test] fn beacon_proposer_index() { - test_beacon_proposer_index::(); + test_beacon_proposer_index::(); } /// Should produce (note the set notation brackets): @@ -61,7 +61,7 @@ fn beacon_proposer_index() { /// (current_epoch - LATEST_ACTIVE_INDEX_ROOTS_LENGTH + ACTIVATION_EXIT_DELAY, current_epoch + /// ACTIVATION_EXIT_DELAY] fn active_index_range(current_epoch: Epoch) -> RangeInclusive { - let delay = T::spec().activation_exit_delay; + let delay = T::default_spec().activation_exit_delay; let start: i32 = current_epoch.as_u64() as i32 - T::latest_active_index_roots() as i32 + delay as i32; @@ -79,7 +79,7 @@ fn active_index_range(current_epoch: Epoch) -> RangeInclusive /// Test getting an active index root at the start and end of the valid range, and one either side /// of that range. fn test_active_index(state_slot: Slot) { - let spec = T::spec(); + let spec = T::default_spec(); let builder: TestingBeaconStateBuilder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec); let (mut state, _keypairs) = builder.build(); @@ -115,11 +115,11 @@ fn test_active_index(state_slot: Slot) { #[test] fn get_active_index_root_index() { - test_active_index::(Slot::new(0)); + test_active_index::(Slot::new(0)); - let epoch = Epoch::from(FoundationEthSpec::latest_active_index_roots() * 4); - let slot = epoch.start_slot(FoundationEthSpec::slots_per_epoch()); - test_active_index::(slot); + let epoch = Epoch::from(MainnetEthSpec::latest_active_index_roots() * 4); + let slot = epoch.start_slot(MainnetEthSpec::slots_per_epoch()); + test_active_index::(slot); } /// Test that @@ -133,8 +133,8 @@ fn test_cache_initialization<'a, T: EthSpec>( spec: &ChainSpec, ) { let slot = relative_epoch - .into_epoch(state.slot.epoch(spec.slots_per_epoch)) - .start_slot(spec.slots_per_epoch); + .into_epoch(state.slot.epoch(T::slots_per_epoch())) + .start_slot(T::slots_per_epoch()); // Assuming the cache isn't already built, assert that a call to a cache-using function fails. assert_eq!( @@ -166,13 +166,14 @@ fn test_cache_initialization<'a, T: EthSpec>( #[test] fn cache_initialization() { - let spec = FewValidatorsEthSpec::spec(); + let spec = MinimalEthSpec::default_spec(); - let builder: TestingBeaconStateBuilder = + let builder: TestingBeaconStateBuilder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec); let (mut state, _keypairs) = builder.build(); - state.slot = (spec.genesis_epoch + 1).start_slot(spec.slots_per_epoch); + state.slot = + (MinimalEthSpec::genesis_epoch() + 1).start_slot(MinimalEthSpec::slots_per_epoch()); test_cache_initialization(&mut state, RelativeEpoch::Previous, &spec); test_cache_initialization(&mut state, RelativeEpoch::Current, &spec); @@ -202,7 +203,7 @@ fn tree_hash_cache() { #[cfg(test)] mod committees { use super::*; - use crate::beacon_state::FewValidatorsEthSpec; + use crate::beacon_state::MinimalEthSpec; use swap_or_not_shuffle::shuffle_list; fn execute_committee_consistency_test( @@ -234,7 +235,7 @@ mod committees { (start_shard..start_shard + T::shard_count() as u64).into_iter(); // Loop through all slots in the epoch being tested. - for slot in epoch.slot_iter(spec.slots_per_epoch) { + for slot in epoch.slot_iter(T::slots_per_epoch()) { let crosslink_committees = state.get_crosslink_committees_at_slot(slot).unwrap(); // Assert that the number of committees in this slot is consistent with the reported number @@ -290,7 +291,7 @@ mod committees { state_epoch: Epoch, cache_epoch: RelativeEpoch, ) { - let spec = &T::spec(); + let spec = &T::default_spec(); let mut builder = TestingBeaconStateBuilder::from_single_keypair( validator_count, @@ -298,8 +299,8 @@ mod committees { spec, ); - let slot = state_epoch.start_slot(spec.slots_per_epoch); - builder.teleport_to_slot(slot, spec); + let slot = state_epoch.start_slot(T::slots_per_epoch()); + builder.teleport_to_slot(slot); let (mut state, _keypairs): (BeaconState, _) = builder.build(); @@ -325,7 +326,7 @@ mod committees { } fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { - let spec = T::spec(); + let spec = T::default_spec(); let validator_count = (T::shard_count() * spec.target_committee_size) + 1; @@ -333,29 +334,29 @@ mod committees { committee_consistency_test::( validator_count as usize, - spec.genesis_epoch + 4, + T::genesis_epoch() + 4, cached_epoch, ); committee_consistency_test::( validator_count as usize, - spec.genesis_epoch + T::slots_per_historical_root() as u64 * T::slots_per_epoch() * 4, + T::genesis_epoch() + T::slots_per_historical_root() as u64 * T::slots_per_epoch() * 4, cached_epoch, ); } #[test] fn current_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Current); + committee_consistency_test_suite::(RelativeEpoch::Current); } #[test] fn previous_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Previous); + committee_consistency_test_suite::(RelativeEpoch::Previous); } #[test] fn next_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Next); + committee_consistency_test_suite::(RelativeEpoch::Next); } } diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 20aa6fcdbc..89ea97070c 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -1,7 +1,7 @@ use crate::*; use int_to_bytes::int_to_bytes4; -use serde_derive::Deserialize; -use test_utils::u8_from_hex_str; +use serde_derive::{Deserialize, Serialize}; +use test_utils::{u8_from_hex_str, u8_to_hex_str}; /// Each of the BLS signature domains. /// @@ -18,7 +18,7 @@ pub enum Domain { /// Holds all the "constants" for a BeaconChain. /// /// Spec v0.6.1 -#[derive(PartialEq, Debug, Clone, Deserialize)] +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] #[serde(default)] pub struct ChainSpec { /* @@ -48,18 +48,19 @@ pub struct ChainSpec { * Initial Values */ pub genesis_slot: Slot, - pub genesis_epoch: Epoch, + // Skipped because serde TOML can't handle u64::max_value, the typical value for this field. + #[serde(skip_serializing)] pub far_future_epoch: Epoch, pub zero_hash: Hash256, - #[serde(deserialize_with = "u8_from_hex_str")] + #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] pub bls_withdrawal_prefix_byte: u8, /* * Time parameters */ + pub genesis_time: u64, pub seconds_per_slot: u64, pub min_attestation_inclusion_delay: u64, - pub slots_per_epoch: u64, pub min_seed_lookahead: Epoch, pub activation_exit_delay: u64, pub slots_per_eth1_voting_period: u64, @@ -137,7 +138,7 @@ impl ChainSpec { /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. /// /// Spec v0.6.1 - pub(crate) fn foundation() -> Self { + pub fn mainnet() -> Self { Self { /* * Misc @@ -166,7 +167,6 @@ impl ChainSpec { * Initial Values */ genesis_slot: Slot::new(0), - genesis_epoch: Epoch::new(0), far_future_epoch: Epoch::new(u64::max_value()), zero_hash: Hash256::zero(), bls_withdrawal_prefix_byte: 0, @@ -174,9 +174,9 @@ impl ChainSpec { /* * Time parameters */ + genesis_time: u64::from(u32::max_value()), seconds_per_slot: 6, min_attestation_inclusion_delay: 4, - slots_per_epoch: 64, min_seed_lookahead: Epoch::new(1), activation_exit_delay: 4, slots_per_eth1_voting_period: 1_024, @@ -219,47 +219,35 @@ impl ChainSpec { * Boot nodes */ boot_nodes: vec![], - chain_id: 1, // foundation chain id + chain_id: 1, // mainnet chain id } } - /// Returns a `ChainSpec` compatible with the Lighthouse testnet specification. - /// - /// Spec v0.4.0 - pub(crate) fn lighthouse_testnet() -> Self { - /* - * Lighthouse testnet bootnodes - */ + /// Returns a `ChainSpec` compatible with the specification suitable for 8 validators. + pub fn minimal() -> Self { + let genesis_slot = Slot::new(0); + + // Note: these bootnodes are placeholders. + // + // Should be updated once static bootnodes exist. let boot_nodes = vec!["/ip4/127.0.0.1/tcp/9000" .parse() .expect("correct multiaddr")]; Self { boot_nodes, - chain_id: 2, // lighthouse testnet chain id - ..ChainSpec::few_validators() - } - } - - /// Returns a `ChainSpec` compatible with the specification suitable for 8 validators. - pub(crate) fn few_validators() -> Self { - let genesis_slot = Slot::new(0); - let slots_per_epoch = 8; - let genesis_epoch = genesis_slot.epoch(slots_per_epoch); - - Self { target_committee_size: 1, + chain_id: 2, // lighthouse testnet chain id genesis_slot, - genesis_epoch, - slots_per_epoch, - ..ChainSpec::foundation() + shuffle_round_count: 10, + ..ChainSpec::mainnet() } } } impl Default for ChainSpec { fn default() -> Self { - Self::foundation() + Self::mainnet() } } @@ -269,12 +257,12 @@ mod tests { use int_to_bytes::int_to_bytes8; #[test] - fn test_foundation_spec_can_be_constructed() { - let _ = ChainSpec::foundation(); + fn test_mainnet_spec_can_be_constructed() { + let _ = ChainSpec::mainnet(); } fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) { - let fork = Fork::genesis(&spec); + let fork = Fork::genesis(Epoch::new(0)); let epoch = Epoch::new(0); let domain = spec.get_domain(epoch, domain_type, &fork); @@ -287,7 +275,7 @@ mod tests { #[test] fn test_get_domain() { - let spec = ChainSpec::foundation(); + let spec = ChainSpec::mainnet(); test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); test_domain(Domain::Randao, spec.domain_randao, &spec); diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index eb4e183f25..47ff299168 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -1,6 +1,6 @@ use crate::{ test_utils::{fork_from_hex_str, TestRandom}, - ChainSpec, Epoch, + Epoch, }; use serde_derive::{Deserialize, Serialize}; @@ -36,11 +36,11 @@ impl Fork { /// Initialize the `Fork` from the genesis parameters in the `spec`. /// /// Spec v0.6.1 - pub fn genesis(spec: &ChainSpec) -> Self { + pub fn genesis(genesis_epoch: Epoch) -> Self { Self { previous_version: [0; 4], current_version: [0; 4], - epoch: spec.genesis_epoch, + epoch: genesis_epoch, } } @@ -63,13 +63,9 @@ mod tests { cached_tree_hash_tests!(Fork); fn test_genesis(epoch: Epoch) { - let mut spec = ChainSpec::foundation(); + let fork = Fork::genesis(epoch); - spec.genesis_epoch = epoch; - - let fork = Fork::genesis(&spec); - - assert_eq!(fork.epoch, spec.genesis_epoch, "epoch incorrect"); + assert_eq!(fork.epoch, epoch, "epoch incorrect"); assert_eq!( fork.previous_version, fork.current_version, "previous and current are not identical" diff --git a/eth2/types/src/historical_batch.rs b/eth2/types/src/historical_batch.rs index 3480508dc7..0d8916216d 100644 --- a/eth2/types/src/historical_batch.rs +++ b/eth2/types/src/historical_batch.rs @@ -31,7 +31,7 @@ pub struct HistoricalBatch { mod tests { use super::*; - pub type FoundationHistoricalBatch = HistoricalBatch; + pub type FoundationHistoricalBatch = HistoricalBatch; ssz_tests!(FoundationHistoricalBatch); cached_tree_hash_tests!(FoundationHistoricalBatch); diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 82cee0d75e..9a7808da4c 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -23,6 +23,7 @@ use std::iter::Iterator; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; #[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] +#[serde(transparent)] pub struct Slot(u64); #[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] @@ -76,7 +77,7 @@ impl Epoch { /// Position of some slot inside an epoch, if any. /// /// E.g., the first `slot` in `epoch` is at position `0`. - pub fn position(&self, slot: Slot, slots_per_epoch: u64) -> Option { + pub fn position(self, slot: Slot, slots_per_epoch: u64) -> Option { let start = self.start_slot(slots_per_epoch); let end = self.end_slot(slots_per_epoch); diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 1e24f8e997..5e02e40c17 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -184,7 +184,7 @@ macro_rules! impl_display { key: slog::Key, serializer: &mut slog::Serializer, ) -> slog::Result { - self.0.serialize(record, key, serializer) + slog::Value::serialize(&self.0, record, key, serializer) } } }; diff --git a/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs b/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs index 2150f5433a..0b4aa29876 100644 --- a/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs @@ -21,7 +21,7 @@ impl TestingAttestationDataBuilder { let previous_epoch = state.previous_epoch(); let is_previous_epoch = - state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); + state.slot.epoch(T::slots_per_epoch()) != slot.epoch(T::slots_per_epoch()); let source_epoch = if is_previous_epoch { state.previous_justified_epoch @@ -37,11 +37,11 @@ impl TestingAttestationDataBuilder { let target_root = if is_previous_epoch { *state - .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch)) + .get_block_root(previous_epoch.start_slot(T::slots_per_epoch())) .unwrap() } else { *state - .get_block_root(current_epoch.start_slot(spec.slots_per_epoch)) + .get_block_root(current_epoch.start_slot(T::slots_per_epoch())) .unwrap() }; @@ -57,7 +57,7 @@ impl TestingAttestationDataBuilder { }; let source_root = *state - .get_block_root(source_epoch.start_slot(spec.slots_per_epoch)) + .get_block_root(source_epoch.start_slot(T::slots_per_epoch())) .unwrap(); let data = AttestationData { diff --git a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs index 941ad8fdd9..36bbe2d37c 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs @@ -36,9 +36,9 @@ impl TestingBeaconBlockBuilder { /// Signs the block. /// /// Modifying the block after signing may invalidate the signature. - pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { + pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { let message = self.block.signed_root(); - let epoch = self.block.slot.epoch(spec.slots_per_epoch); + let epoch = self.block.slot.epoch(T::slots_per_epoch()); let domain = spec.get_domain(epoch, Domain::BeaconProposer, fork); self.block.signature = Signature::new(&message, domain, sk); } @@ -46,8 +46,8 @@ impl TestingBeaconBlockBuilder { /// Sets the randao to be a signature across the blocks epoch. /// /// Modifying the block's slot after signing may invalidate the signature. - pub fn set_randao_reveal(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { - let epoch = self.block.slot.epoch(spec.slots_per_epoch); + pub fn set_randao_reveal(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { + let epoch = self.block.slot.epoch(T::slots_per_epoch()); let message = epoch.tree_hash_root(); let domain = spec.get_domain(epoch, Domain::Randao, fork); self.block.body.randao_reveal = Signature::new(&message, domain, sk); @@ -59,14 +59,15 @@ impl TestingBeaconBlockBuilder { } /// Inserts a signed, valid `ProposerSlashing` for the validator. - pub fn insert_proposer_slashing( + pub fn insert_proposer_slashing( &mut self, validator_index: u64, secret_key: &SecretKey, fork: &Fork, spec: &ChainSpec, ) { - let proposer_slashing = build_proposer_slashing(validator_index, secret_key, fork, spec); + let proposer_slashing = + build_proposer_slashing::(validator_index, secret_key, fork, spec); self.block.body.proposer_slashings.push(proposer_slashing); } @@ -115,7 +116,7 @@ impl TestingBeaconBlockBuilder { // - The slot is too old to be included in a block at this slot. // - The `MAX_ATTESTATIONS`. loop { - if state.slot >= slot + spec.slots_per_epoch { + if state.slot >= slot + T::slots_per_epoch() { break; } @@ -194,7 +195,7 @@ impl TestingBeaconBlockBuilder { builder.set_index(index); builder.sign( &keypair, - state.slot.epoch(spec.slots_per_epoch), + state.slot.epoch(T::slots_per_epoch()), &state.fork, spec, ); @@ -211,7 +212,7 @@ impl TestingBeaconBlockBuilder { spec: &ChainSpec, ) { let mut builder = TestingVoluntaryExitBuilder::new( - state.slot.epoch(spec.slots_per_epoch), + state.slot.epoch(T::slots_per_epoch()), validator_index, ); @@ -234,14 +235,19 @@ impl TestingBeaconBlockBuilder { spec: &ChainSpec, ) { let mut builder = TestingTransferBuilder::new(from, to, amount, state.slot); - builder.sign(keypair, &state.fork, spec); + builder.sign::(keypair, &state.fork, spec); self.block.body.transfers.push(builder.build()) } /// Signs and returns the block, consuming the builder. - pub fn build(mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) -> BeaconBlock { - self.sign(sk, fork, spec); + pub fn build( + mut self, + sk: &SecretKey, + fork: &Fork, + spec: &ChainSpec, + ) -> BeaconBlock { + self.sign::(sk, fork, spec); self.block } @@ -254,7 +260,7 @@ impl TestingBeaconBlockBuilder { /// Builds an `ProposerSlashing` for some `validator_index`. /// /// Signs the message using a `BeaconChainHarness`. -fn build_proposer_slashing( +fn build_proposer_slashing( validator_index: u64, secret_key: &SecretKey, fork: &Fork, @@ -265,7 +271,7 @@ fn build_proposer_slashing( Signature::new(message, domain, secret_key) }; - TestingProposerSlashingBuilder::double_vote(validator_index, signer, spec) + TestingProposerSlashingBuilder::double_vote::(validator_index, signer) } /// Builds an `AttesterSlashing` for some `validator_indices`. diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 20ed8a893a..4f09275087 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -6,7 +6,6 @@ use dirs; use log::debug; use rayon::prelude::*; use std::path::{Path, PathBuf}; -use std::time::SystemTime; pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; @@ -113,8 +112,8 @@ impl TestingBeaconStateBuilder { pubkey: keypair.pk.clone(), withdrawal_credentials, // All validators start active. - activation_eligibility_epoch: spec.genesis_epoch, - activation_epoch: spec.genesis_epoch, + activation_eligibility_epoch: T::genesis_epoch(), + activation_epoch: T::genesis_epoch(), exit_epoch: spec.far_future_epoch, withdrawable_epoch: spec.far_future_epoch, slashed: false, @@ -123,20 +122,8 @@ impl TestingBeaconStateBuilder { }) .collect(); - // TODO: Testing only. Burn with fire later. - // set genesis to the last 30 minute block. - // this is used for testing only. Allows multiple nodes to connect within a 30min window - // and agree on a genesis - let now = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs(); - let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); - // genesis is now the last 30 minute block. - let genesis_time = now - secs_after_last_period; - let mut state = BeaconState::genesis( - genesis_time, + spec.genesis_time, Eth1Data { deposit_root: Hash256::zero(), deposit_count: 0, @@ -172,8 +159,8 @@ impl TestingBeaconStateBuilder { } /// Sets the `BeaconState` to be in a slot, calling `teleport_to_epoch` to update the epoch. - pub fn teleport_to_slot(&mut self, slot: Slot, spec: &ChainSpec) { - self.teleport_to_epoch(slot.epoch(spec.slots_per_epoch), spec); + pub fn teleport_to_slot(&mut self, slot: Slot) { + self.teleport_to_epoch(slot.epoch(T::slots_per_epoch())); self.state.slot = slot; } @@ -181,10 +168,10 @@ impl TestingBeaconStateBuilder { /// /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e., /// highest justified and finalized slots, full justification bitfield, etc). - fn teleport_to_epoch(&mut self, epoch: Epoch, spec: &ChainSpec) { + fn teleport_to_epoch(&mut self, epoch: Epoch) { let state = &mut self.state; - let slot = epoch.start_slot(spec.slots_per_epoch); + let slot = epoch.start_slot(T::slots_per_epoch()); state.slot = slot; @@ -214,8 +201,8 @@ impl TestingBeaconStateBuilder { let current_epoch = state.current_epoch(); let previous_epoch = state.previous_epoch(); - let first_slot = previous_epoch.start_slot(spec.slots_per_epoch).as_u64(); - let last_slot = current_epoch.end_slot(spec.slots_per_epoch).as_u64() + let first_slot = previous_epoch.start_slot(T::slots_per_epoch()).as_u64(); + let last_slot = current_epoch.end_slot(T::slots_per_epoch()).as_u64() - spec.min_attestation_inclusion_delay; let last_slot = std::cmp::min(state.slot.as_u64(), last_slot); diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index 458082de2d..67668d130d 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -17,8 +17,9 @@ impl TestingProposerSlashingBuilder { /// - `domain: Domain` /// /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). - pub fn double_vote(proposer_index: u64, signer: F, spec: &ChainSpec) -> ProposerSlashing + pub fn double_vote(proposer_index: u64, signer: F) -> ProposerSlashing where + T: EthSpec, F: Fn(u64, &[u8], Epoch, Domain) -> Signature, { let slot = Slot::new(0); @@ -40,13 +41,13 @@ impl TestingProposerSlashingBuilder { header_1.signature = { let message = header_1.signed_root(); - let epoch = slot.epoch(spec.slots_per_epoch); + let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; header_2.signature = { let message = header_2.signed_root(); - let epoch = slot.epoch(spec.slots_per_epoch); + let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; diff --git a/eth2/types/src/test_utils/builders/testing_transfer_builder.rs b/eth2/types/src/test_utils/builders/testing_transfer_builder.rs index 2680f7b664..d3c3da19e6 100644 --- a/eth2/types/src/test_utils/builders/testing_transfer_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_transfer_builder.rs @@ -29,10 +29,10 @@ impl TestingTransferBuilder { /// Signs the transfer. /// /// The keypair must match that of the `from` validator index. - pub fn sign(&mut self, keypair: Keypair, fork: &Fork, spec: &ChainSpec) { + pub fn sign(&mut self, keypair: Keypair, fork: &Fork, spec: &ChainSpec) { self.transfer.pubkey = keypair.pk; let message = self.transfer.signed_root(); - let epoch = self.transfer.slot.epoch(spec.slots_per_epoch); + let epoch = self.transfer.slot.epoch(T::slots_per_epoch()); let domain = spec.get_domain(epoch, Domain::Transfer, fork); self.transfer.signature = Signature::new(&message, domain, &keypair.sk); diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index ee8327be86..b5ec7a0270 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -14,5 +14,5 @@ pub use rand::{ RngCore, {prng::XorShiftRng, SeedableRng}, }; -pub use serde_utils::{fork_from_hex_str, graffiti_from_hex_str, u8_from_hex_str}; +pub use serde_utils::{fork_from_hex_str, graffiti_from_hex_str, u8_from_hex_str, u8_to_hex_str}; pub use test_random::TestRandom; diff --git a/eth2/types/src/test_utils/serde_utils.rs b/eth2/types/src/test_utils/serde_utils.rs index 5c0238c0bb..079551b583 100644 --- a/eth2/types/src/test_utils/serde_utils.rs +++ b/eth2/types/src/test_utils/serde_utils.rs @@ -1,5 +1,5 @@ use serde::de::Error; -use serde::{Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, Serializer}; pub const FORK_BYTES_LEN: usize = 4; pub const GRAFFITI_BYTES_LEN: usize = 32; @@ -13,6 +13,17 @@ where u8::from_str_radix(&s.as_str()[2..], 16).map_err(D::Error::custom) } +#[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref. +pub fn u8_to_hex_str(byte: &u8, serializer: S) -> Result +where + S: Serializer, +{ + let mut hex: String = "0x".to_string(); + hex.push_str(&hex::encode(&[*byte])); + + serializer.serialize_str(&hex) +} + pub fn fork_from_hex_str<'de, D>(deserializer: D) -> Result<[u8; FORK_BYTES_LEN], D::Error> where D: Deserializer<'de>, diff --git a/eth2/utils/eth2_config/Cargo.toml b/eth2/utils/eth2_config/Cargo.toml new file mode 100644 index 0000000000..5af385e2d6 --- /dev/null +++ b/eth2/utils/eth2_config/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "eth2_config" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +clap = "2.32.0" +dirs = "1.0.3" +serde = "1.0" +serde_derive = "1.0" +toml = "^0.5" +types = { path = "../../types" } diff --git a/eth2/utils/eth2_config/src/lib.rs b/eth2/utils/eth2_config/src/lib.rs new file mode 100644 index 0000000000..9d50a95c18 --- /dev/null +++ b/eth2/utils/eth2_config/src/lib.rs @@ -0,0 +1,119 @@ +use clap::ArgMatches; +use serde_derive::{Deserialize, Serialize}; +use std::fs; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; +use std::time::SystemTime; +use types::ChainSpec; + +/// The core configuration of a Lighthouse beacon node. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct Eth2Config { + pub spec_constants: String, + pub spec: ChainSpec, +} + +impl Default for Eth2Config { + fn default() -> Self { + Self { + spec_constants: "minimal".to_string(), + spec: ChainSpec::minimal(), + } + } +} + +impl Eth2Config { + pub fn mainnet() -> Self { + Self { + spec_constants: "mainnet".to_string(), + spec: ChainSpec::mainnet(), + } + } + + pub fn minimal() -> Self { + Self { + spec_constants: "minimal".to_string(), + spec: ChainSpec::minimal(), + } + } +} + +impl Eth2Config { + /// Apply the following arguments to `self`, replacing values if they are specified in `args`. + /// + /// Returns an error if arguments are obviously invalid. May succeed even if some values are + /// invalid. + pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { + if args.is_present("recent-genesis") { + self.spec.genesis_time = recent_genesis_time() + } + + Ok(()) + } +} + +/// Returns the system time, mod 30 minutes. +/// +/// Used for easily creating testnets. +fn recent_genesis_time() -> u64 { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); + // genesis is now the last 30 minute block. + now - secs_after_last_period +} + +/// Write a configuration to file. +pub fn write_to_file(path: PathBuf, config: &T) -> Result<(), String> +where + T: Default + serde::de::DeserializeOwned + serde::Serialize, +{ + if let Ok(mut file) = File::create(path.clone()) { + let toml_encoded = toml::to_string(&config).map_err(|e| { + format!( + "Failed to write configuration to {:?}. Error: {:?}", + path, e + ) + })?; + file.write_all(toml_encoded.as_bytes()) + .unwrap_or_else(|_| panic!("Unable to write to {:?}", path)); + } + + Ok(()) +} + +/// Loads a `ClientConfig` from file. If unable to load from file, generates a default +/// configuration and saves that as a sample file. +pub fn read_from_file(path: PathBuf) -> Result, String> +where + T: Default + serde::de::DeserializeOwned + serde::Serialize, +{ + if let Ok(mut file) = File::open(path.clone()) { + let mut contents = String::new(); + file.read_to_string(&mut contents) + .map_err(|e| format!("Unable to read {:?}. Error: {:?}", path, e))?; + + let config = toml::from_str(&contents) + .map_err(|e| format!("Unable to parse {:?}: {:?}", path, e))?; + + Ok(Some(config)) + } else { + Ok(None) + } +} + +pub fn get_data_dir(args: &ArgMatches, default_data_dir: PathBuf) -> Result { + if let Some(data_dir) = args.value_of("data_dir") { + Ok(PathBuf::from(data_dir)) + } else { + let path = dirs::home_dir() + .ok_or_else(|| "Unable to locate home directory")? + .join(&default_data_dir); + fs::create_dir_all(&path).map_err(|_| "Unable to create data_dir")?; + Ok(path) + } +} diff --git a/eth2/utils/fixed_len_vec/src/impls.rs b/eth2/utils/fixed_len_vec/src/impls.rs index e1c54c1f78..691c8ee89a 100644 --- a/eth2/utils/fixed_len_vec/src/impls.rs +++ b/eth2/utils/fixed_len_vec/src/impls.rs @@ -100,7 +100,7 @@ where } fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.len() == 0 { + if bytes.is_empty() { Ok(FixedLenVec::from(vec![])) } else if T::is_ssz_fixed_len() { bytes diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index fd5a2d1d7d..7b86684fa4 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -6,9 +6,14 @@ pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotC use std::time::Duration; pub use types::Slot; -pub trait SlotClock: Send + Sync { +pub trait SlotClock: Send + Sync + Sized { type Error; + /// Create a new `SlotClock`. + /// + /// Returns an Error if `slot_duration_seconds == 0`. + fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self; + fn present_slot(&self) -> Result, Self::Error>; fn duration_to_next_slot(&self) -> Result, Self::Error>; diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index 4dfc6b37da..7c184b02bf 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -18,31 +18,25 @@ pub struct SystemTimeSlotClock { slot_duration_seconds: u64, } -impl SystemTimeSlotClock { - /// Create a new `SystemTimeSlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - pub fn new( - genesis_slot: Slot, - genesis_seconds: u64, - slot_duration_seconds: u64, - ) -> Result { - if slot_duration_seconds == 0 { - Err(Error::SlotDurationIsZero) - } else { - Ok(Self { - genesis_slot, - genesis_seconds, - slot_duration_seconds, - }) - } - } -} - impl SlotClock for SystemTimeSlotClock { type Error = Error; + /// Create a new `SystemTimeSlotClock`. + /// + /// Returns an Error if `slot_duration_seconds == 0`. + fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self { + Self { + genesis_slot, + genesis_seconds, + slot_duration_seconds, + } + } + fn present_slot(&self) -> Result, Error> { + if self.slot_duration_seconds == 0 { + return Err(Error::SlotDurationIsZero); + } + let syslot_time = SystemTime::now(); let duration_since_epoch = syslot_time.duration_since(SystemTime::UNIX_EPOCH)?; let duration_since_genesis = diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index b5c36dfa0a..fc9b7201bb 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -8,30 +8,28 @@ pub enum Error {} /// Determines the present slot based upon the present system time. pub struct TestingSlotClock { - slot: RwLock, + slot: RwLock, } impl TestingSlotClock { - /// Create a new `TestingSlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - pub fn new(slot: u64) -> TestingSlotClock { - TestingSlotClock { - slot: RwLock::new(slot), - } - } - pub fn set_slot(&self, slot: u64) { - *self.slot.write().expect("TestingSlotClock poisoned.") = slot; + *self.slot.write().expect("TestingSlotClock poisoned.") = Slot::from(slot); } } impl SlotClock for TestingSlotClock { type Error = Error; + /// Create a new `TestingSlotClock` at `genesis_slot`. + fn new(genesis_slot: Slot, _genesis_seconds: u64, _slot_duration_seconds: u64) -> Self { + TestingSlotClock { + slot: RwLock::new(genesis_slot), + } + } + fn present_slot(&self) -> Result, Error> { let slot = *self.slot.read().expect("TestingSlotClock poisoned."); - Ok(Some(Slot::new(slot))) + Ok(Some(slot)) } /// Always returns a duration of 1 second. @@ -46,7 +44,9 @@ mod tests { #[test] fn test_slot_now() { - let clock = TestingSlotClock::new(10); + let null = 0; + + let clock = TestingSlotClock::new(Slot::new(10), null, null); assert_eq!(clock.present_slot(), Ok(Some(Slot::new(10)))); clock.set_slot(123); assert_eq!(clock.present_slot(), Ok(Some(Slot::new(123)))); diff --git a/eth2/utils/ssz/src/decode.rs b/eth2/utils/ssz/src/decode.rs index 891104733c..77144092b1 100644 --- a/eth2/utils/ssz/src/decode.rs +++ b/eth2/utils/ssz/src/decode.rs @@ -102,9 +102,7 @@ impl<'a> SszDecoderBuilder<'a> { .and_then(|o| Some(o.offset)) .unwrap_or_else(|| BYTES_PER_LENGTH_OFFSET); - if previous_offset > offset { - return Err(DecodeError::OutOfBoundsByte { i: offset }); - } else if offset > self.bytes.len() { + if (previous_offset > offset) || (offset > self.bytes.len()) { return Err(DecodeError::OutOfBoundsByte { i: offset }); } @@ -220,6 +218,12 @@ impl<'a> SszDecoder<'a> { } } +/// Reads a `BYTES_PER_LENGTH_OFFSET`-byte union index from `bytes`, where `bytes.len() >= +/// BYTES_PER_LENGTH_OFFSET`. +pub fn read_union_index(bytes: &[u8]) -> Result { + read_offset(bytes) +} + /// Reads a `BYTES_PER_LENGTH_OFFSET`-byte length from `bytes`, where `bytes.len() >= /// BYTES_PER_LENGTH_OFFSET`. fn read_offset(bytes: &[u8]) -> Result { diff --git a/eth2/utils/ssz/src/decode/impls.rs b/eth2/utils/ssz/src/decode/impls.rs index ccb99fa2f0..0965ee3e54 100644 --- a/eth2/utils/ssz/src/decode/impls.rs +++ b/eth2/utils/ssz/src/decode/impls.rs @@ -1,4 +1,5 @@ use super::*; +use core::num::NonZeroUsize; use ethereum_types::{H256, U128, U256}; macro_rules! impl_decodable_for_uint { @@ -54,16 +55,68 @@ impl Decode for bool { match bytes[0] { 0b0000_0000 => Ok(false), 0b0000_0001 => Ok(true), - _ => { - return Err(DecodeError::BytesInvalid( - format!("Out-of-range for boolean: {}", bytes[0]).to_string(), - )); - } + _ => Err(DecodeError::BytesInvalid( + format!("Out-of-range for boolean: {}", bytes[0]).to_string(), + )), } } } } +impl Decode for NonZeroUsize { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let x = usize::from_ssz_bytes(bytes)?; + + if x == 0 { + Err(DecodeError::BytesInvalid( + "NonZeroUsize cannot be zero.".to_string(), + )) + } else { + // `unwrap` is safe here as `NonZeroUsize::new()` succeeds if `x > 0` and this path + // never executes when `x == 0`. + Ok(NonZeroUsize::new(x).unwrap()) + } + } +} + +/// The SSZ union type. +impl Decode for Option { + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + if bytes.len() < BYTES_PER_LENGTH_OFFSET { + return Err(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: BYTES_PER_LENGTH_OFFSET, + }); + } + + let (index_bytes, value_bytes) = bytes.split_at(BYTES_PER_LENGTH_OFFSET); + + let index = read_union_index(index_bytes)?; + if index == 0 { + Ok(None) + } else if index == 1 { + Ok(Some(T::from_ssz_bytes(value_bytes)?)) + } else { + Err(DecodeError::BytesInvalid(format!( + "{} is not a valid union index for Option", + index + ))) + } + } +} + impl Decode for H256 { fn is_ssz_fixed_len() -> bool { true @@ -164,7 +217,7 @@ impl Decode for Vec { } fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.len() == 0 { + if bytes.is_empty() { Ok(vec![]) } else if T::is_ssz_fixed_len() { bytes diff --git a/eth2/utils/ssz/src/encode.rs b/eth2/utils/ssz/src/encode.rs index 257ece2a25..6ceb08debc 100644 --- a/eth2/utils/ssz/src/encode.rs +++ b/eth2/utils/ssz/src/encode.rs @@ -126,6 +126,13 @@ impl<'a> SszEncoder<'a> { } } +/// Encode `index` as a little-endian byte vec of `BYTES_PER_LENGTH_OFFSET` length. +/// +/// If `len` is larger than `2 ^ BYTES_PER_LENGTH_OFFSET`, a `debug_assert` is raised. +pub fn encode_union_index(index: usize) -> Vec { + encode_length(index) +} + /// Encode `len` as a little-endian byte vec of `BYTES_PER_LENGTH_OFFSET` length. /// /// If `len` is larger than `2 ^ BYTES_PER_LENGTH_OFFSET`, a `debug_assert` is raised. diff --git a/eth2/utils/ssz/src/encode/impls.rs b/eth2/utils/ssz/src/encode/impls.rs index 0d6891c5e2..04492a1f2d 100644 --- a/eth2/utils/ssz/src/encode/impls.rs +++ b/eth2/utils/ssz/src/encode/impls.rs @@ -1,4 +1,5 @@ use super::*; +use core::num::NonZeroUsize; use ethereum_types::{H256, U128, U256}; macro_rules! impl_encodable_for_uint { @@ -25,6 +26,23 @@ impl_encodable_for_uint!(u32, 32); impl_encodable_for_uint!(u64, 64); impl_encodable_for_uint!(usize, 64); +/// The SSZ "union" type. +impl Encode for Option { + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_append(&self, buf: &mut Vec) { + match self { + None => buf.append(&mut encode_union_index(0)), + Some(t) => { + buf.append(&mut encode_union_index(1)); + t.ssz_append(buf); + } + } + } +} + impl Encode for Vec { fn is_ssz_fixed_len() -> bool { false @@ -63,6 +81,20 @@ impl Encode for bool { } } +impl Encode for NonZeroUsize { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.get().ssz_append(buf) + } +} + impl Encode for H256 { fn is_ssz_fixed_len() -> bool { true @@ -168,6 +200,25 @@ mod tests { ); } + #[test] + fn ssz_encode_option_u16() { + assert_eq!(Some(65535_u16).as_ssz_bytes(), vec![1, 0, 0, 0, 255, 255]); + + let none: Option = None; + assert_eq!(none.as_ssz_bytes(), vec![0, 0, 0, 0]); + } + + #[test] + fn ssz_encode_option_vec_u16() { + assert_eq!( + Some(vec![0_u16, 1]).as_ssz_bytes(), + vec![1, 0, 0, 0, 0, 0, 1, 0] + ); + + let none: Option> = None; + assert_eq!(none.as_ssz_bytes(), vec![0, 0, 0, 0]); + } + #[test] fn ssz_encode_u8() { assert_eq!(0_u8.as_ssz_bytes(), vec![0]); diff --git a/eth2/utils/ssz/tests/tests.rs b/eth2/utils/ssz/tests/tests.rs index ed318d9244..9447cf5372 100644 --- a/eth2/utils/ssz/tests/tests.rs +++ b/eth2/utils/ssz/tests/tests.rs @@ -276,7 +276,7 @@ mod round_trip { fn offsets_decreasing() { let bytes = vec![ // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | ofset | offset | variable + // | offset | offset | offset | variable 01, 00, 14, 00, 00, 00, 15, 00, 00, 00, 14, 00, 00, 00, 00, 00, ]; @@ -285,4 +285,65 @@ mod round_trip { Err(DecodeError::OutOfBoundsByte { i: 14 }) ); } + + #[derive(Debug, PartialEq, Encode, Decode)] + struct TwoVariableLenOptions { + a: u16, + b: Option, + c: Option>, + d: Option>, + } + + #[test] + fn two_variable_len_options_encoding() { + let s = TwoVariableLenOptions { + a: 42, + b: None, + c: Some(vec![0]), + d: None, + }; + + let bytes = vec![ + // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 + // | option | offset | offset | option = vec![ + TwoVariableLenOptions { + a: 42, + b: Some(12), + c: Some(vec![0]), + d: Some(vec![1]), + }, + TwoVariableLenOptions { + a: 42, + b: Some(12), + c: Some(vec![0]), + d: None, + }, + TwoVariableLenOptions { + a: 42, + b: None, + c: Some(vec![0]), + d: None, + }, + TwoVariableLenOptions { + a: 42, + b: None, + c: None, + d: None, + }, + ]; + + round_trip(vec); + } } diff --git a/tests/ef_tests/src/case_result.rs b/tests/ef_tests/src/case_result.rs index cd40ac8cea..88fd353a14 100644 --- a/tests/ef_tests/src/case_result.rs +++ b/tests/ef_tests/src/case_result.rs @@ -28,13 +28,10 @@ pub fn compare_beacon_state_results_without_caches( result: &mut Result, E>, expected: &mut Option>, ) -> Result<(), Error> { - match (result.as_mut(), expected.as_mut()) { - (Ok(ref mut result), Some(ref mut expected)) => { - result.drop_all_caches(); - expected.drop_all_caches(); - } - _ => (), - }; + if let (Ok(ref mut result), Some(ref mut expected)) = (result.as_mut(), expected.as_mut()) { + result.drop_all_caches(); + expected.drop_all_caches(); + } compare_result_detailed(&result, &expected) } diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index 46a1578d14..3801ba6a75 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -78,7 +78,7 @@ where impl YamlDecode for Cases { /// Decodes a YAML list of test cases - fn yaml_decode(yaml: &String) -> Result { + fn yaml_decode(yaml: &str) -> Result { let mut p = 0; let mut elems: Vec<&str> = yaml .match_indices("\n- ") diff --git a/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs b/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs index 6cd37ec36e..6e38743f2f 100644 --- a/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs +++ b/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs @@ -10,8 +10,8 @@ pub struct BlsAggregatePubkeys { } impl YamlDecode for BlsAggregatePubkeys { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } diff --git a/tests/ef_tests/src/cases/bls_aggregate_sigs.rs b/tests/ef_tests/src/cases/bls_aggregate_sigs.rs index 5b69a6134e..eeecab82cd 100644 --- a/tests/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/tests/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -10,8 +10,8 @@ pub struct BlsAggregateSigs { } impl YamlDecode for BlsAggregateSigs { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } diff --git a/tests/ef_tests/src/cases/bls_g2_compressed.rs b/tests/ef_tests/src/cases/bls_g2_compressed.rs index b03821430c..8478a0ff62 100644 --- a/tests/ef_tests/src/cases/bls_g2_compressed.rs +++ b/tests/ef_tests/src/cases/bls_g2_compressed.rs @@ -16,8 +16,8 @@ pub struct BlsG2Compressed { } impl YamlDecode for BlsG2Compressed { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -52,13 +52,13 @@ impl Case for BlsG2Compressed { } // Converts a vector to u64 (from big endian) -fn bytes_to_u64(array: &Vec) -> u64 { +fn bytes_to_u64(array: &[u8]) -> u64 { let mut result: u64 = 0; for (i, value) in array.iter().rev().enumerate() { if i == 8 { break; } - result += u64::pow(2, i as u32 * 8) * (*value as u64); + result += u64::pow(2, i as u32 * 8) * u64::from(*value); } result } diff --git a/tests/ef_tests/src/cases/bls_g2_uncompressed.rs b/tests/ef_tests/src/cases/bls_g2_uncompressed.rs index 93b1e1c51e..962b6aac39 100644 --- a/tests/ef_tests/src/cases/bls_g2_uncompressed.rs +++ b/tests/ef_tests/src/cases/bls_g2_uncompressed.rs @@ -16,8 +16,8 @@ pub struct BlsG2Uncompressed { } impl YamlDecode for BlsG2Uncompressed { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -56,13 +56,13 @@ impl Case for BlsG2Uncompressed { } // Converts a vector to u64 (from big endian) -fn bytes_to_u64(array: &Vec) -> u64 { +fn bytes_to_u64(array: &[u8]) -> u64 { let mut result: u64 = 0; for (i, value) in array.iter().rev().enumerate() { if i == 8 { break; } - result += u64::pow(2, i as u32 * 8) * (*value as u64); + result += u64::pow(2, i as u32 * 8) * u64::from(*value); } result } diff --git a/tests/ef_tests/src/cases/bls_priv_to_pub.rs b/tests/ef_tests/src/cases/bls_priv_to_pub.rs index c558d01428..d72a43bbbc 100644 --- a/tests/ef_tests/src/cases/bls_priv_to_pub.rs +++ b/tests/ef_tests/src/cases/bls_priv_to_pub.rs @@ -10,8 +10,8 @@ pub struct BlsPrivToPub { } impl YamlDecode for BlsPrivToPub { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } diff --git a/tests/ef_tests/src/cases/bls_sign_msg.rs b/tests/ef_tests/src/cases/bls_sign_msg.rs index a361f2523c..e62c3550fa 100644 --- a/tests/ef_tests/src/cases/bls_sign_msg.rs +++ b/tests/ef_tests/src/cases/bls_sign_msg.rs @@ -17,8 +17,8 @@ pub struct BlsSign { } impl YamlDecode for BlsSign { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -46,13 +46,13 @@ impl Case for BlsSign { } // Converts a vector to u64 (from big endian) -fn bytes_to_u64(array: &Vec) -> u64 { +fn bytes_to_u64(array: &[u8]) -> u64 { let mut result: u64 = 0; for (i, value) in array.iter().rev().enumerate() { if i == 8 { break; } - result += u64::pow(2, i as u32 * 8) * (*value as u64); + result += u64::pow(2, i as u32 * 8) * u64::from(*value); } result } diff --git a/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs b/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs index fa530f9add..bf1564b97c 100644 --- a/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs +++ b/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs @@ -14,8 +14,8 @@ pub struct EpochProcessingCrosslinks { } impl YamlDecode for EpochProcessingCrosslinks { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -29,9 +29,9 @@ impl Case for EpochProcessingCrosslinks { let mut expected = self.post.clone(); // Processing requires the epoch cache. - state.build_all_caches(&E::spec()).unwrap(); + state.build_all_caches(&E::default_spec()).unwrap(); - let mut result = process_crosslinks(&mut state, &E::spec()).map(|_| state); + let mut result = process_crosslinks(&mut state, &E::default_spec()).map(|_| state); compare_beacon_state_results_without_caches(&mut result, &mut expected) } diff --git a/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs b/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs index d91a7a4c3e..02311656e3 100644 --- a/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs +++ b/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs @@ -14,8 +14,8 @@ pub struct EpochProcessingRegistryUpdates { } impl YamlDecode for EpochProcessingRegistryUpdates { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -27,7 +27,7 @@ impl Case for EpochProcessingRegistryUpdates { fn result(&self, _case_index: usize) -> Result<(), Error> { let mut state = self.pre.clone(); let mut expected = self.post.clone(); - let spec = &E::spec(); + let spec = &E::default_spec(); // Processing requires the epoch cache. state.build_all_caches(spec).unwrap(); diff --git a/tests/ef_tests/src/cases/operations_attestation.rs b/tests/ef_tests/src/cases/operations_attestation.rs index 85813192aa..1db0f6d020 100644 --- a/tests/ef_tests/src/cases/operations_attestation.rs +++ b/tests/ef_tests/src/cases/operations_attestation.rs @@ -17,8 +17,8 @@ pub struct OperationsAttestation { } impl YamlDecode for OperationsAttestation { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(&yaml).unwrap()) } } @@ -28,6 +28,8 @@ impl Case for OperationsAttestation { } fn result(&self, _case_index: usize) -> Result<(), Error> { + let spec = &E::default_spec(); + self.bls_setting.unwrap_or_default().check()?; let mut state = self.pre.clone(); @@ -35,9 +37,9 @@ impl Case for OperationsAttestation { let mut expected = self.post.clone(); // Processing requires the epoch cache. - state.build_all_caches(&E::spec()).unwrap(); + state.build_all_caches(spec).unwrap(); - let result = process_attestations(&mut state, &[attestation], &E::spec()); + let result = process_attestations(&mut state, &[attestation], spec); let mut result = result.and_then(|_| Ok(state)); diff --git a/tests/ef_tests/src/cases/operations_attester_slashing.rs b/tests/ef_tests/src/cases/operations_attester_slashing.rs index 2966311dfe..fd74350099 100644 --- a/tests/ef_tests/src/cases/operations_attester_slashing.rs +++ b/tests/ef_tests/src/cases/operations_attester_slashing.rs @@ -17,8 +17,8 @@ pub struct OperationsAttesterSlashing { } impl YamlDecode for OperationsAttesterSlashing { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -35,9 +35,10 @@ impl Case for OperationsAttesterSlashing { let mut expected = self.post.clone(); // Processing requires the epoch cache. - state.build_all_caches(&E::spec()).unwrap(); + state.build_all_caches(&E::default_spec()).unwrap(); - let result = process_attester_slashings(&mut state, &[attester_slashing], &E::spec()); + let result = + process_attester_slashings(&mut state, &[attester_slashing], &E::default_spec()); let mut result = result.and_then(|_| Ok(state)); diff --git a/tests/ef_tests/src/cases/operations_block_header.rs b/tests/ef_tests/src/cases/operations_block_header.rs index ac1c103545..359c4eb452 100644 --- a/tests/ef_tests/src/cases/operations_block_header.rs +++ b/tests/ef_tests/src/cases/operations_block_header.rs @@ -17,8 +17,8 @@ pub struct OperationsBlockHeader { } impl YamlDecode for OperationsBlockHeader { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -28,16 +28,18 @@ impl Case for OperationsBlockHeader { } fn result(&self, _case_index: usize) -> Result<(), Error> { + let spec = &E::default_spec(); + self.bls_setting.unwrap_or_default().check()?; let mut state = self.pre.clone(); let mut expected = self.post.clone(); // Processing requires the epoch cache. - state.build_all_caches(&E::spec()).unwrap(); + state.build_all_caches(spec).unwrap(); let mut result = - process_block_header(&mut state, &self.block, &E::spec(), true).map(|_| state); + process_block_header(&mut state, &self.block, spec, true).map(|_| state); compare_beacon_state_results_without_caches(&mut result, &mut expected) } diff --git a/tests/ef_tests/src/cases/operations_deposit.rs b/tests/ef_tests/src/cases/operations_deposit.rs index 3d0ad88293..7478708b08 100644 --- a/tests/ef_tests/src/cases/operations_deposit.rs +++ b/tests/ef_tests/src/cases/operations_deposit.rs @@ -17,8 +17,8 @@ pub struct OperationsDeposit { } impl YamlDecode for OperationsDeposit { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -34,7 +34,7 @@ impl Case for OperationsDeposit { let deposit = self.deposit.clone(); let mut expected = self.post.clone(); - let result = process_deposits(&mut state, &[deposit], &E::spec()); + let result = process_deposits(&mut state, &[deposit], &E::default_spec()); let mut result = result.and_then(|_| Ok(state)); diff --git a/tests/ef_tests/src/cases/operations_exit.rs b/tests/ef_tests/src/cases/operations_exit.rs index 1eb3aa4811..013021c04d 100644 --- a/tests/ef_tests/src/cases/operations_exit.rs +++ b/tests/ef_tests/src/cases/operations_exit.rs @@ -17,8 +17,8 @@ pub struct OperationsExit { } impl YamlDecode for OperationsExit { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -35,9 +35,9 @@ impl Case for OperationsExit { let mut expected = self.post.clone(); // Exit processing requires the epoch cache. - state.build_all_caches(&E::spec()).unwrap(); + state.build_all_caches(&E::default_spec()).unwrap(); - let result = process_exits(&mut state, &[exit], &E::spec()); + let result = process_exits(&mut state, &[exit], &E::default_spec()); let mut result = result.and_then(|_| Ok(state)); diff --git a/tests/ef_tests/src/cases/operations_proposer_slashing.rs b/tests/ef_tests/src/cases/operations_proposer_slashing.rs index 892fff9f9c..7ddb971635 100644 --- a/tests/ef_tests/src/cases/operations_proposer_slashing.rs +++ b/tests/ef_tests/src/cases/operations_proposer_slashing.rs @@ -17,8 +17,8 @@ pub struct OperationsProposerSlashing { } impl YamlDecode for OperationsProposerSlashing { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -35,9 +35,10 @@ impl Case for OperationsProposerSlashing { let mut expected = self.post.clone(); // Processing requires the epoch cache. - state.build_all_caches(&E::spec()).unwrap(); + state.build_all_caches(&E::default_spec()).unwrap(); - let result = process_proposer_slashings(&mut state, &[proposer_slashing], &E::spec()); + let result = + process_proposer_slashings(&mut state, &[proposer_slashing], &E::default_spec()); let mut result = result.and_then(|_| Ok(state)); diff --git a/tests/ef_tests/src/cases/operations_transfer.rs b/tests/ef_tests/src/cases/operations_transfer.rs index a488846d49..8456017b5a 100644 --- a/tests/ef_tests/src/cases/operations_transfer.rs +++ b/tests/ef_tests/src/cases/operations_transfer.rs @@ -17,8 +17,8 @@ pub struct OperationsTransfer { } impl YamlDecode for OperationsTransfer { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -35,9 +35,9 @@ impl Case for OperationsTransfer { let mut expected = self.post.clone(); // Transfer processing requires the epoch cache. - state.build_all_caches(&E::spec()).unwrap(); + state.build_all_caches(&E::default_spec()).unwrap(); - let mut spec = E::spec(); + let mut spec = E::default_spec(); spec.max_transfers = 1; let result = process_transfers(&mut state, &[transfer], &spec); diff --git a/tests/ef_tests/src/cases/sanity_blocks.rs b/tests/ef_tests/src/cases/sanity_blocks.rs index c0ea2df702..91824f48d9 100644 --- a/tests/ef_tests/src/cases/sanity_blocks.rs +++ b/tests/ef_tests/src/cases/sanity_blocks.rs @@ -17,8 +17,8 @@ pub struct SanityBlocks { } impl YamlDecode for SanityBlocks { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -42,10 +42,10 @@ impl Case for SanityBlocks { let mut state = self.pre.clone(); let mut expected = self.post.clone(); - let spec = &E::spec(); + let spec = &E::default_spec(); // Processing requires the epoch cache. - state.build_all_caches(&E::spec()).unwrap(); + state.build_all_caches(spec).unwrap(); let mut result = self .blocks diff --git a/tests/ef_tests/src/cases/sanity_slots.rs b/tests/ef_tests/src/cases/sanity_slots.rs index 9b02428c26..779a90c709 100644 --- a/tests/ef_tests/src/cases/sanity_slots.rs +++ b/tests/ef_tests/src/cases/sanity_slots.rs @@ -15,8 +15,8 @@ pub struct SanitySlots { } impl YamlDecode for SanitySlots { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -28,10 +28,10 @@ impl Case for SanitySlots { fn result(&self, _case_index: usize) -> Result<(), Error> { let mut state = self.pre.clone(); let mut expected = self.post.clone(); - let spec = &E::spec(); + let spec = &E::default_spec(); // Processing requires the epoch cache. - state.build_all_caches(&E::spec()).unwrap(); + state.build_all_caches(spec).unwrap(); let mut result = (0..self.slots) .try_for_each(|_| per_slot_processing(&mut state, spec)) diff --git a/tests/ef_tests/src/cases/shuffling.rs b/tests/ef_tests/src/cases/shuffling.rs index ef8a1b934c..d7ff40e596 100644 --- a/tests/ef_tests/src/cases/shuffling.rs +++ b/tests/ef_tests/src/cases/shuffling.rs @@ -14,8 +14,8 @@ pub struct Shuffling { } impl YamlDecode for Shuffling { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -24,13 +24,12 @@ impl Case for Shuffling { if self.count == 0 { compare_result::<_, Error>(&Ok(vec![]), &Some(self.shuffled.clone()))?; } else { - let spec = T::spec(); + let spec = T::default_spec(); let seed = hex::decode(&self.seed[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; // Test get_permuted_index let shuffling = (0..self.count) - .into_iter() .map(|i| { get_permutated_index(i, self.count, &seed, spec.shuffle_round_count).unwrap() }) diff --git a/tests/ef_tests/src/cases/ssz_generic.rs b/tests/ef_tests/src/cases/ssz_generic.rs index 09aba39f1c..ca49d21060 100644 --- a/tests/ef_tests/src/cases/ssz_generic.rs +++ b/tests/ef_tests/src/cases/ssz_generic.rs @@ -15,8 +15,8 @@ pub struct SszGeneric { } impl YamlDecode for SszGeneric { - fn yaml_decode(yaml: &String) -> Result { - Ok(serde_yaml::from_str(&yaml.as_str()).unwrap()) + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) } } @@ -45,11 +45,7 @@ impl Case for SszGeneric { } /// Execute a `ssz_generic` test case. -fn ssz_generic_test( - should_be_ok: bool, - ssz: &String, - value: &Option, -) -> Result<(), Error> +fn ssz_generic_test(should_be_ok: bool, ssz: &str, value: &Option) -> Result<(), Error> where T: Decode + YamlDecode + Debug + PartialEq, { diff --git a/tests/ef_tests/src/cases/ssz_static.rs b/tests/ef_tests/src/cases/ssz_static.rs index 374b90bd27..3365a51e13 100644 --- a/tests/ef_tests/src/cases/ssz_static.rs +++ b/tests/ef_tests/src/cases/ssz_static.rs @@ -55,7 +55,7 @@ where } impl YamlDecode for SszStatic { - fn yaml_decode(yaml: &String) -> Result { + fn yaml_decode(yaml: &str) -> Result { serde_yaml::from_str(yaml).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) } } diff --git a/tests/ef_tests/src/doc.rs b/tests/ef_tests/src/doc.rs index f7b67fc3ad..183f2781fb 100644 --- a/tests/ef_tests/src/doc.rs +++ b/tests/ef_tests/src/doc.rs @@ -2,11 +2,11 @@ use crate::case_result::CaseResult; use crate::cases::*; use crate::doc_header::DocHeader; use crate::error::Error; -use crate::eth_specs::{MainnetEthSpec, MinimalEthSpec}; use crate::yaml_decode::{yaml_split_header_and_cases, YamlDecode}; use crate::EfTest; use serde_derive::Deserialize; use std::{fs::File, io::prelude::*, path::PathBuf}; +use types::{MainnetEthSpec, MinimalEthSpec}; #[derive(Debug, Deserialize)] pub struct Doc { @@ -190,7 +190,7 @@ pub fn print_results( ); println!("Title: {}", header.title); println!("File: {:?}", doc.path); - println!(""); + println!(); println!( "{} tests, {} failed, {} skipped (known failure), {} skipped (bls), {} passed.", results.len(), @@ -199,7 +199,7 @@ pub fn print_results( skipped_bls.len(), results.len() - skipped_bls.len() - skipped_known_failures.len() - failed.len() ); - println!(""); + println!(); for case in skipped_known_failures { println!("-------"); @@ -220,5 +220,5 @@ pub fn print_results( ); println!("{}", error.message()); } - println!(""); + println!(); } diff --git a/tests/ef_tests/src/lib.rs b/tests/ef_tests/src/lib.rs index be12d45c3a..fdd4e7b859 100644 --- a/tests/ef_tests/src/lib.rs +++ b/tests/ef_tests/src/lib.rs @@ -12,7 +12,6 @@ mod cases; mod doc; mod doc_header; mod error; -mod eth_specs; mod yaml_decode; /// Defined where an object can return the results of some test(s) adhering to the Ethereum diff --git a/tests/ef_tests/src/yaml_decode.rs b/tests/ef_tests/src/yaml_decode.rs index 974df8311a..c89dd92a9e 100644 --- a/tests/ef_tests/src/yaml_decode.rs +++ b/tests/ef_tests/src/yaml_decode.rs @@ -8,14 +8,14 @@ pub use utils::*; pub trait YamlDecode: Sized { /// Decode an object from the test specification YAML. - fn yaml_decode(string: &String) -> Result; + fn yaml_decode(string: &str) -> Result; } /// Basic types can general be decoded with the `parse` fn if they implement `str::FromStr`. macro_rules! impl_via_parse { ($ty: ty) => { impl YamlDecode for $ty { - fn yaml_decode(string: &String) -> Result { + fn yaml_decode(string: &str) -> Result { string .parse::() .map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) @@ -34,7 +34,7 @@ impl_via_parse!(u64); macro_rules! impl_via_from_dec_str { ($ty: ty) => { impl YamlDecode for $ty { - fn yaml_decode(string: &String) -> Result { + fn yaml_decode(string: &str) -> Result { Self::from_dec_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) } } @@ -48,7 +48,7 @@ impl_via_from_dec_str!(U256); macro_rules! impl_via_serde_yaml { ($ty: ty) => { impl YamlDecode for $ty { - fn yaml_decode(string: &String) -> Result { + fn yaml_decode(string: &str) -> Result { serde_yaml::from_str(string) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) } diff --git a/tests/ef_tests/src/yaml_decode/utils.rs b/tests/ef_tests/src/yaml_decode/utils.rs index 059d3b5d21..7b6caac728 100644 --- a/tests/ef_tests/src/yaml_decode/utils.rs +++ b/tests/ef_tests/src/yaml_decode/utils.rs @@ -3,7 +3,7 @@ pub fn yaml_split_header_and_cases(mut yaml: String) -> (String, String) { // + 1 to skip the \n we used for matching. let mut test_cases = yaml.split_off(test_cases_start + 1); - let end_of_first_line = test_cases.find("\n").unwrap(); + let end_of_first_line = test_cases.find('\n').unwrap(); let test_cases = test_cases.split_off(end_of_first_line + 1); (yaml, test_cases) diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 559460c8bc..1784bdcb1e 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -15,19 +15,22 @@ path = "src/lib.rs" [dependencies] bls = { path = "../eth2/utils/bls" } ssz = { path = "../eth2/utils/ssz" } +eth2_config = { path = "../eth2/utils/eth2_config" } tree_hash = { path = "../eth2/utils/tree_hash" } clap = "2.32.0" -dirs = "1.0.3" grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } protobuf = "2.0.2" protos = { path = "../protos" } slot_clock = { path = "../eth2/utils/slot_clock" } types = { path = "../eth2/types" } +serde = "1.0" +serde_derive = "1.0" slog = "^2.2.3" slog-term = "^2.4.0" slog-async = "^2.3.0" tokio = "0.1.18" tokio-timer = "0.2.10" +toml = "^0.5" error-chain = "0.12.0" bincode = "^1.1.2" futures = "0.1.25" diff --git a/validator_client/eth2_config.toml b/validator_client/eth2_config.toml new file mode 100644 index 0000000000..49d4e1bd38 --- /dev/null +++ b/validator_client/eth2_config.toml @@ -0,0 +1,47 @@ +spec_constants = "minimal" + +[spec] +target_committee_size = 1 +max_indices_per_attestation = 4096 +min_per_epoch_churn_limit = 4 +churn_limit_quotient = 65536 +base_rewards_per_epoch = 5 +shuffle_round_count = 10 +deposit_contract_tree_depth = 32 +min_deposit_amount = 1000000000 +max_effective_balance = 32000000000 +ejection_balance = 16000000000 +effective_balance_increment = 1000000000 +genesis_slot = 0 +zero_hash = "0x0000000000000000000000000000000000000000000000000000000000000000" +bls_withdrawal_prefix_byte = "0x00" +genesis_time = 4294967295 +seconds_per_slot = 6 +min_attestation_inclusion_delay = 4 +min_seed_lookahead = 1 +activation_exit_delay = 4 +slots_per_eth1_voting_period = 1024 +slots_per_historical_root = 8192 +min_validator_withdrawability_delay = 256 +persistent_committee_period = 2048 +max_crosslink_epochs = 64 +min_epochs_to_inactivity_penalty = 4 +base_reward_quotient = 32 +whistleblowing_reward_quotient = 512 +proposer_reward_quotient = 8 +inactivity_penalty_quotient = 33554432 +min_slashing_penalty_quotient = 32 +max_proposer_slashings = 16 +max_attester_slashings = 1 +max_attestations = 128 +max_deposits = 16 +max_voluntary_exits = 16 +max_transfers = 0 +domain_beacon_proposer = 0 +domain_randao = 1 +domain_attestation = 2 +domain_deposit = 3 +domain_voluntary_exit = 4 +domain_transfer = 5 +boot_nodes = ["/ip4/127.0.0.1/tcp/9000"] +chain_id = 2 diff --git a/validator_client/src/attestation_producer/mod.rs b/validator_client/src/attestation_producer/mod.rs index 0a65c1f1e3..d59f383ef4 100644 --- a/validator_client/src/attestation_producer/mod.rs +++ b/validator_client/src/attestation_producer/mod.rs @@ -39,6 +39,8 @@ pub struct AttestationProducer<'a, B: BeaconNodeAttestation, S: Signer> { pub beacon_node: Arc, /// The signer to sign the block. pub signer: &'a S, + /// Used for caclulating epoch. + pub slots_per_epoch: u64, } impl<'a, B: BeaconNodeAttestation, S: Signer> AttestationProducer<'a, B, S> { @@ -78,7 +80,7 @@ impl<'a, B: BeaconNodeAttestation, S: Signer> AttestationProducer<'a, B, S> { /// The slash-protection code is not yet implemented. There is zero protection against /// slashing. pub fn produce_attestation(&mut self) -> Result { - let epoch = self.duty.slot.epoch(self.spec.slots_per_epoch); + let epoch = self.duty.slot.epoch(self.slots_per_epoch); let attestation = self .beacon_node diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index fc01b81265..212db1f8eb 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -48,6 +48,8 @@ pub struct BlockProducer<'a, B: BeaconNodeBlock, S: Signer> { pub beacon_node: Arc, /// The signer to sign the block. pub signer: &'a S, + /// Used for caclulating epoch. + pub slots_per_epoch: u64, } impl<'a, B: BeaconNodeBlock, S: Signer> BlockProducer<'a, B, S> { @@ -84,7 +86,7 @@ impl<'a, B: BeaconNodeBlock, S: Signer> BlockProducer<'a, B, S> { /// The slash-protection code is not yet implemented. There is zero protection against /// slashing. pub fn produce_block(&mut self) -> Result { - let epoch = self.slot.epoch(self.spec.slots_per_epoch); + let epoch = self.slot.epoch(self.slots_per_epoch); let message = epoch.tree_hash_root(); let randao_reveal = match self.signer.sign_message( @@ -181,14 +183,14 @@ mod tests { pub fn polling() { let mut rng = XorShiftRng::from_seed([42; 16]); - let spec = Arc::new(ChainSpec::foundation()); + let spec = Arc::new(ChainSpec::mainnet()); let slot_clock = Arc::new(TestingSlotClock::new(0)); let beacon_node = Arc::new(SimulatedBeaconNode::default()); let signer = Arc::new(LocalSigner::new(Keypair::random())); - let mut epoch_map = EpochMap::new(spec.slots_per_epoch); + let mut epoch_map = EpochMap::new(T::slots_per_epoch()); let produce_slot = Slot::new(100); - let produce_epoch = produce_slot.epoch(spec.slots_per_epoch); + let produce_epoch = produce_slot.epoch(T::slots_per_epoch()); epoch_map.map.insert(produce_epoch, produce_slot); let epoch_map = Arc::new(epoch_map); @@ -233,7 +235,7 @@ mod tests { ); // In an epoch without known duties... - let slot = (produce_epoch.as_u64() + 1) * spec.slots_per_epoch; + let slot = (produce_epoch.as_u64() + 1) * T::slots_per_epoch(); slot_clock.set_slot(slot); assert_eq!( block_proposer.poll(), diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 1e9450d599..d7664c1613 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,24 +1,23 @@ use bincode; use bls::Keypair; use clap::ArgMatches; +use serde_derive::{Deserialize, Serialize}; use slog::{debug, error, info}; use std::fs; use std::fs::File; use std::io::{Error, ErrorKind}; use std::path::PathBuf; -use types::{ - ChainSpec, EthSpec, FewValidatorsEthSpec, FoundationEthSpec, LighthouseTestnetEthSpec, -}; +use types::{EthSpec, MainnetEthSpec}; /// Stores the core configuration for this validator instance. -#[derive(Clone)] +#[derive(Clone, Serialize, Deserialize)] pub struct Config { /// The data directory, which stores all validator databases pub data_dir: PathBuf, /// The server at which the Beacon Node can be contacted pub server: String, - /// The chain specification that we are connecting to - pub spec: ChainSpec, + /// The number of slots per epoch. + pub slots_per_epoch: u64, } const DEFAULT_PRIVATE_KEY_FILENAME: &str = "private.key"; @@ -26,67 +25,36 @@ const DEFAULT_PRIVATE_KEY_FILENAME: &str = "private.key"; impl Default for Config { /// Build a new configuration from defaults. fn default() -> Self { - let data_dir = { - let home = dirs::home_dir().expect("Unable to determine home directory."); - home.join(".lighthouse-validator") - }; - - let server = "localhost:5051".to_string(); - - let spec = FoundationEthSpec::spec(); - Self { - data_dir, - server, - spec, + data_dir: PathBuf::from(".lighthouse-validator"), + server: "localhost:5051".to_string(), + slots_per_epoch: MainnetEthSpec::slots_per_epoch(), } } } impl Config { - /// Build a new configuration from defaults, which are overrided by arguments provided. - pub fn parse_args(args: &ArgMatches, log: &slog::Logger) -> Result { - let mut config = Config::default(); - - // Use the specified datadir, or default in the home directory + /// Apply the following arguments to `self`, replacing values if they are specified in `args`. + /// + /// Returns an error if arguments are obviously invalid. May succeed even if some values are + /// invalid. + pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { if let Some(datadir) = args.value_of("datadir") { - config.data_dir = PathBuf::from(datadir); - info!(log, "Using custom data dir: {:?}", &config.data_dir); + self.data_dir = PathBuf::from(datadir); }; - fs::create_dir_all(&config.data_dir) - .unwrap_or_else(|_| panic!("Unable to create {:?}", &config.data_dir)); - if let Some(srv) = args.value_of("server") { - //TODO: Validate the server value, to ensure it makes sense. - config.server = srv.to_string(); - info!(log, "Using custom server: {:?}", &config.server); + self.server = srv.to_string(); }; - // TODO: Permit loading a custom spec from file. - if let Some(spec_str) = args.value_of("spec") { - info!(log, "Using custom spec: {:?}", spec_str); - config.spec = match spec_str { - "foundation" => FoundationEthSpec::spec(), - "few_validators" => FewValidatorsEthSpec::spec(), - "lighthouse_testnet" => LighthouseTestnetEthSpec::spec(), - // Should be impossible due to clap's `possible_values(..)` function. - _ => unreachable!(), - }; - }; - // Log configuration - info!(log, ""; - "data_dir" => &config.data_dir.to_str(), - "server" => &config.server); - - Ok(config) + Ok(()) } /// Try to load keys from validator_dir, returning None if none are found or an error. #[allow(dead_code)] pub fn fetch_keys(&self, log: &slog::Logger) -> Option> { let key_pairs: Vec = fs::read_dir(&self.data_dir) - .unwrap() + .ok()? .filter_map(|validator_dir| { let validator_dir = validator_dir.ok()?; diff --git a/validator_client/src/duties/mod.rs b/validator_client/src/duties/mod.rs index b2ddfd0b09..f0269a41f6 100644 --- a/validator_client/src/duties/mod.rs +++ b/validator_client/src/duties/mod.rs @@ -163,8 +163,8 @@ mod tests { #[test] pub fn polling() { - let spec = Arc::new(ChainSpec::foundation()); - let duties_map = Arc::new(EpochDutiesMap::new(spec.slots_per_epoch)); + let spec = Arc::new(ChainSpec::mainnet()); + let duties_map = Arc::new(EpochDutiesMap::new(T::slots_per_epoch())); let keypair = Keypair::random(); let slot_clock = Arc::new(TestingSlotClock::new(0)); let beacon_node = Arc::new(TestBeaconNode::default()); diff --git a/validator_client/src/error.rs b/validator_client/src/error.rs index 29d7ba8829..97500f900b 100644 --- a/validator_client/src/error.rs +++ b/validator_client/src/error.rs @@ -1,9 +1,6 @@ use slot_clock; -use error_chain::{ - error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, - impl_extract_backtrace, -}; +use error_chain::error_chain; error_chain! { links { } diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 038399936d..f749154385 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -9,9 +9,16 @@ mod signer; use crate::config::Config as ValidatorClientConfig; use crate::service::Service as ValidatorService; use clap::{App, Arg}; +use eth2_config::{get_data_dir, read_from_file, write_to_file, Eth2Config}; use protos::services_grpc::ValidatorServiceClient; -use slog::{error, info, o, Drain}; -use types::Keypair; +use slog::{crit, error, info, o, Drain}; +use std::path::PathBuf; +use types::{Keypair, MainnetEthSpec, MinimalEthSpec}; + +pub const DEFAULT_SPEC: &str = "minimal"; +pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator"; +pub const CLIENT_CONFIG_FILENAME: &str = "validator-client.toml"; +pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; fn main() { // Logging @@ -32,6 +39,14 @@ fn main() { .help("Data directory for keys and databases.") .takes_value(true), ) + .arg( + Arg::with_name("eth2-spec") + .long("eth2-spec") + .short("e") + .value_name("TOML_FILE") + .help("Path to Ethereum 2.0 specifications file.") + .takes_value(true), + ) .arg( Arg::with_name("server") .long("server") @@ -40,24 +55,125 @@ fn main() { .takes_value(true), ) .arg( - Arg::with_name("spec") - .long("spec") - .value_name("spec") + Arg::with_name("spec-constants") + .long("spec-constants") + .value_name("TITLE") .short("s") - .help("Configuration of Beacon Chain") + .help("The title of the spec constants for chain config.") .takes_value(true) - .possible_values(&["foundation", "few_validators", "lighthouse_testnet"]) - .default_value("lighthouse_testnet"), + .possible_values(&["mainnet", "minimal"]) + .default_value("minimal"), ) .get_matches(); - let config = ValidatorClientConfig::parse_args(&matches, &log) - .expect("Unable to build a configuration for the validator client."); + let data_dir = match get_data_dir(&matches, PathBuf::from(DEFAULT_DATA_DIR)) { + Ok(dir) => dir, + Err(e) => { + crit!(log, "Failed to initialize data dir"; "error" => format!("{:?}", e)); + return; + } + }; + + let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); + + // Attempt to lead the `ClientConfig` from disk. + // + // If file doesn't exist, create a new, default one. + let mut client_config = match read_from_file::( + client_config_path.clone(), + ) { + Ok(Some(c)) => c, + Ok(None) => { + let default = ValidatorClientConfig::default(); + if let Err(e) = write_to_file(client_config_path.clone(), &default) { + crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); + return; + } + default + } + Err(e) => { + crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); + return; + } + }; + + // Ensure the `data_dir` in the config matches that supplied to the CLI. + client_config.data_dir = data_dir.clone(); + + // Update the client config with any CLI args. + match client_config.apply_cli_args(&matches) { + Ok(()) => (), + Err(s) => { + crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); + return; + } + }; + + let eth2_config_path: PathBuf = matches + .value_of("eth2-spec") + .and_then(|s| Some(PathBuf::from(s))) + .unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME)); + + // Attempt to load the `Eth2Config` from file. + // + // If the file doesn't exist, create a default one depending on the CLI flags. + let mut eth2_config = match read_from_file::(eth2_config_path.clone()) { + Ok(Some(c)) => c, + Ok(None) => { + let default = match matches.value_of("spec-constants") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + _ => unreachable!(), // Guarded by slog. + }; + if let Err(e) = write_to_file(eth2_config_path, &default) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + default + } + Err(e) => { + crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); + return; + } + }; + + // Update the eth2 config with any CLI flags. + match eth2_config.apply_cli_args(&matches) { + Ok(()) => (), + Err(s) => { + crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s); + return; + } + }; + + info!( + log, + "Starting validator client"; + "datadir" => client_config.data_dir.to_str(), + "spec_constants" => ð2_config.spec_constants, + ); + + let result = match eth2_config.spec_constants.as_str() { + "mainnet" => ValidatorService::::start::( + client_config, + eth2_config, + log.clone(), + ), + "minimal" => ValidatorService::::start::( + client_config, + eth2_config, + log.clone(), + ), + other => { + crit!(log, "Unknown spec constants"; "title" => other); + return; + } + }; // start the validator service. // this specifies the GRPC and signer type to use as the duty manager beacon node. - match ValidatorService::::start(config, log.clone()) { + match result { Ok(_) => info!(log, "Validator client shutdown successfully."), - Err(e) => error!(log, "Validator exited due to: {}", e.to_string()), + Err(e) => crit!(log, "Validator client exited with error"; "error" => e.to_string()), } } diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index a340f99fc2..8dbb82b37f 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -16,6 +16,7 @@ use crate::error as error_chain; use crate::error::ErrorKind; use crate::signer::Signer; use bls::Keypair; +use eth2_config::Eth2Config; use grpcio::{ChannelBuilder, EnvBuilder}; use protos::services::Empty; use protos::services_grpc::{ @@ -31,7 +32,7 @@ use tokio::prelude::*; use tokio::runtime::Builder; use tokio::timer::Interval; use tokio_timer::clock::Clock; -use types::{ChainSpec, Epoch, Fork, Slot}; +use types::{ChainSpec, Epoch, EthSpec, Fork, Slot}; /// A fixed amount of time after a slot to perform operations. This gives the node time to complete /// per-slot processes. @@ -47,6 +48,7 @@ pub struct Service { slot_clock: SystemTimeSlotClock, /// The current slot we are processing. current_slot: Slot, + slots_per_epoch: u64, /// The chain specification for this clients instance. spec: Arc, /// The duties manager which maintains the state of when to perform actions. @@ -65,8 +67,9 @@ impl Service { /// /// This tries to connect to a beacon node. Once connected, it initialised the gRPC clients /// and returns an instance of the service. - fn initialize_service( - config: ValidatorConfig, + fn initialize_service( + client_config: ValidatorConfig, + eth2_config: Eth2Config, log: slog::Logger, ) -> error_chain::Result> { // initialise the beacon node client to check for a connection @@ -74,7 +77,7 @@ impl Service { let env = Arc::new(EnvBuilder::new().build()); // Beacon node gRPC beacon node endpoints. let beacon_node_client = { - let ch = ChannelBuilder::new(env.clone()).connect(&config.server); + let ch = ChannelBuilder::new(env.clone()).connect(&client_config.server); BeaconNodeServiceClient::new(ch) }; @@ -102,12 +105,12 @@ impl Service { return Err("Genesis time in the future".into()); } // verify the node's chain id - if config.spec.chain_id != info.chain_id as u8 { + if eth2_config.spec.chain_id != info.chain_id as u8 { error!( log, "Beacon Node's genesis time is in the future. No work to do.\n Exiting" ); - return Err(format!("Beacon node has the wrong chain id. Expected chain id: {}, node's chain id: {}", config.spec.chain_id, info.chain_id).into()); + return Err(format!("Beacon node has the wrong chain id. Expected chain id: {}, node's chain id: {}", eth2_config.spec.chain_id, info.chain_id).into()); } break info; } @@ -135,7 +138,7 @@ impl Service { // Beacon node gRPC beacon block endpoints. let beacon_block_client = { - let ch = ChannelBuilder::new(env.clone()).connect(&config.server); + let ch = ChannelBuilder::new(env.clone()).connect(&client_config.server); let beacon_block_service_client = Arc::new(BeaconBlockServiceClient::new(ch)); // a wrapper around the service client to implement the beacon block node trait Arc::new(BeaconBlockGrpcClient::new(beacon_block_service_client)) @@ -143,34 +146,42 @@ impl Service { // Beacon node gRPC validator endpoints. let validator_client = { - let ch = ChannelBuilder::new(env.clone()).connect(&config.server); + let ch = ChannelBuilder::new(env.clone()).connect(&client_config.server); Arc::new(ValidatorServiceClient::new(ch)) }; //Beacon node gRPC attester endpoints. let attestation_client = { - let ch = ChannelBuilder::new(env.clone()).connect(&config.server); + let ch = ChannelBuilder::new(env.clone()).connect(&client_config.server); Arc::new(AttestationServiceClient::new(ch)) }; // build the validator slot clock - let slot_clock = - SystemTimeSlotClock::new(genesis_slot, genesis_time, config.spec.seconds_per_slot) - .expect("Unable to instantiate SystemTimeSlotClock."); + let slot_clock = SystemTimeSlotClock::new( + genesis_slot, + genesis_time, + eth2_config.spec.seconds_per_slot, + ); let current_slot = slot_clock .present_slot() .map_err(ErrorKind::SlotClockError)? - .expect("Genesis must be in the future"); + .ok_or_else::(|| { + "Genesis is not in the past. Exiting.".into() + })?; /* Generate the duties manager */ // Load generated keypairs - let keypairs = match config.fetch_keys(&log) { + let keypairs = match client_config.fetch_keys(&log) { Some(kps) => Arc::new(kps), - None => panic!("No key pairs found, cannot start validator client without at least one. Try running `./account_manager generate` first.") + None => { + return Err("Unable to locate validator key pairs, nothing to do.".into()); + } }; + let slots_per_epoch = T::slots_per_epoch(); + // TODO: keypairs are randomly generated; they should be loaded from a file or generated. // https://github.com/sigp/lighthouse/issues/160 //let keypairs = Arc::new(generate_deterministic_keypairs(8)); @@ -178,7 +189,7 @@ impl Service { // Builds a mapping of Epoch -> Map(PublicKey, EpochDuty) // where EpochDuty contains slot numbers and attestation data that each validator needs to // produce work on. - let duties_map = RwLock::new(EpochDutiesMap::new(config.spec.slots_per_epoch)); + let duties_map = RwLock::new(EpochDutiesMap::new(slots_per_epoch)); // builds a manager which maintains the list of current duties for all known validators // and can check when a validator needs to perform a task. @@ -189,12 +200,13 @@ impl Service { beacon_node: validator_client, }); - let spec = Arc::new(config.spec); + let spec = Arc::new(eth2_config.spec); Ok(Service { fork, slot_clock, current_slot, + slots_per_epoch, spec, duties_manager, beacon_block_client, @@ -205,10 +217,17 @@ impl Service { /// Initialise the service then run the core thread. // TODO: Improve handling of generic BeaconNode types, to stub grpcClient - pub fn start(config: ValidatorConfig, log: slog::Logger) -> error_chain::Result<()> { + pub fn start( + client_config: ValidatorConfig, + eth2_config: Eth2Config, + log: slog::Logger, + ) -> error_chain::Result<()> { // connect to the node and retrieve its properties and initialize the gRPC clients - let mut service = - Service::::initialize_service(config, log)?; + let mut service = Service::::initialize_service::( + client_config, + eth2_config, + log, + )?; // we have connected to a node and established its parameters. Spin up the core service @@ -223,7 +242,9 @@ impl Service { .slot_clock .duration_to_next_slot() .map_err(|e| format!("System clock error: {:?}", e))? - .expect("Cannot start before genesis"); + .ok_or_else::(|| { + "Genesis is not in the past. Exiting.".into() + })?; // set up the validator work interval - start at next slot and proceed every slot let interval = { @@ -272,10 +293,12 @@ impl Service { error!(self.log, "SystemTimeError {:?}", e); return Err("Could not read system time".into()); } - Ok(slot) => slot.expect("Genesis is in the future"), + Ok(slot) => slot.ok_or_else::(|| { + "Genesis is not in the past. Exiting.".into() + })?, }; - let current_epoch = current_slot.epoch(self.spec.slots_per_epoch); + let current_epoch = current_slot.epoch(self.slots_per_epoch); // this is a fatal error. If the slot clock repeats, there is something wrong with // the timer, terminate immediately. @@ -292,7 +315,7 @@ impl Service { fn check_for_duties(&mut self) { let cloned_manager = self.duties_manager.clone(); let cloned_log = self.log.clone(); - let current_epoch = self.current_slot.epoch(self.spec.slots_per_epoch); + let current_epoch = self.current_slot.epoch(self.slots_per_epoch); // spawn a new thread separate to the runtime // TODO: Handle thread termination/timeout // TODO: Add duties thread back in, with channel to process duties in duty change. @@ -317,6 +340,7 @@ impl Service { let spec = self.spec.clone(); let beacon_node = self.beacon_block_client.clone(); let log = self.log.clone(); + let slots_per_epoch = self.slots_per_epoch; std::thread::spawn(move || { info!(log, "Producing a block"; "Validator"=> format!("{}", signers[signer_index])); let signer = &signers[signer_index]; @@ -326,6 +350,7 @@ impl Service { spec, beacon_node, signer, + slots_per_epoch, }; block_producer.handle_produce_block(log); }); @@ -338,6 +363,7 @@ impl Service { let spec = self.spec.clone(); let beacon_node = self.attestation_client.clone(); let log = self.log.clone(); + let slots_per_epoch = self.slots_per_epoch; std::thread::spawn(move || { info!(log, "Producing an attestation"; "Validator"=> format!("{}", signers[signer_index])); let signer = &signers[signer_index]; @@ -347,6 +373,7 @@ impl Service { spec, beacon_node, signer, + slots_per_epoch, }; attestation_producer.handle_produce_attestation(log); });