diff --git a/Cargo.toml b/Cargo.toml index 23ae186571..7bf700ee5f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "eth2/utils/deposit_contract", "eth2/utils/eth2_config", "eth2/utils/eth2_interop_keypairs", + "eth2/utils/eth2_testnet_config", "eth2/utils/logging", "eth2/utils/eth2_hashing", "eth2/utils/lighthouse_metrics", diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 729adb3353..7caf1da06b 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -23,3 +23,6 @@ eth2_ssz_derive = { path = "../eth2/utils/ssz_derive" } hex = "0.4" validator_client = { path = "../validator_client" } rayon = "1.2.0" +eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" } +web3 = "0.8.0" +futures = "0.1.25" diff --git a/account_manager/src/cli.rs b/account_manager/src/cli.rs index 01d6376cdb..d56898ebdc 100644 --- a/account_manager/src/cli.rs +++ b/account_manager/src/cli.rs @@ -10,6 +10,56 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .subcommand( SubCommand::with_name("new") .about("Create a new Ethereum 2.0 validator.") + .arg( + Arg::with_name("deposit-value") + .short("v") + .long("deposit-value") + .value_name("GWEI") + .takes_value(true) + .default_value("32000000000") + .help("The deposit amount in Gwei (not Wei). Default is 32 ETH."), + ) + .arg( + Arg::with_name("send-deposits") + .long("send-deposits") + .help("If present, submit validator deposits to an eth1 endpoint / + defined by the --eth1-endpoint. Requires either the / + --deposit-contract or --testnet-dir flag.") + ) + .arg( + Arg::with_name("eth1-endpoint") + .short("e") + .long("eth1-endpoint") + .value_name("HTTP_SERVER") + .takes_value(true) + .default_value("http://localhost:8545") + .help("The URL to the Eth1 JSON-RPC HTTP API (e.g., Geth/Parity-Ethereum)."), + ) + .arg( + Arg::with_name("account-index") + .short("i") + .long("account-index") + .value_name("INDEX") + .takes_value(true) + .default_value("0") + .help("The eth1 accounts[] index which will send the transaction"), + ) + .arg( + Arg::with_name("password") + .short("p") + .long("password") + .value_name("FILE") + .takes_value(true) + .help("The password file to unlock the eth1 account (see --index)"), + ) + .arg( + Arg::with_name("testnet-dir") + .long("testnet-dir") + .value_name("DIRECTORY") + .takes_value(true) + .help("The directory from which to read the deposit contract / + address. Defaults to the current Lighthouse testnet."), + ) .subcommand( SubCommand::with_name("insecure") .about("Produce insecure, ephemeral validators. DO NOT USE TO STORE VALUE.") @@ -25,7 +75,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("last") .index(2) .value_name("INDEX") - .help("Index of the first validator") + .help("Index of the last validator") .takes_value(true) .required(true), ), diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index 37d2d7692a..e6d02d614c 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -1,20 +1,30 @@ mod cli; use clap::ArgMatches; -use environment::RuntimeContext; +use deposit_contract::DEPOSIT_GAS; +use environment::{Environment, RuntimeContext}; +use eth2_testnet_config::Eth2TestnetConfig; +use futures::{future, Future, IntoFuture, Stream}; use rayon::prelude::*; -use slog::{crit, info}; +use slog::{crit, error, info, Logger}; use std::fs; +use std::fs::File; +use std::io::Read; use std::path::PathBuf; use types::{ChainSpec, EthSpec}; use validator_client::validator_directory::{ValidatorDirectory, ValidatorDirectoryBuilder}; +use web3::{ + transports::Http, + types::{Address, TransactionRequest, U256}, + Web3, +}; pub use cli::cli_app; /// Run the account manager, logging an error if the operation did not succeed. -pub fn run(matches: &ArgMatches, context: RuntimeContext) { - let log = context.log.clone(); - match run_account_manager(matches, context) { +pub fn run(matches: &ArgMatches, mut env: Environment) { + let log = env.core_context().log.clone(); + match run_account_manager(matches, env) { Ok(()) => (), Err(e) => crit!(log, "Account manager failed"; "error" => e), } @@ -23,26 +33,24 @@ pub fn run(matches: &ArgMatches, context: RuntimeContext) { /// Run the account manager, returning an error if the operation did not succeed. fn run_account_manager( matches: &ArgMatches, - context: RuntimeContext, + mut env: Environment, ) -> Result<(), String> { + let context = env.core_context(); let log = context.log.clone(); + // If the `datadir` was not provided, default to the home directory. If the home directory is + // not known, use the current directory. let datadir = matches .value_of("datadir") .map(PathBuf::from) .unwrap_or_else(|| { - let mut default_dir = match dirs::home_dir() { - Some(v) => v, - None => { - panic!("Failed to find a home directory"); - } - }; - default_dir.push(".lighthouse"); - default_dir.push("validators"); - default_dir + dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(".lighthouse") + .join("validators") }); - fs::create_dir_all(&datadir).map_err(|e| format!("Failed to initialize datadir: {}", e))?; + fs::create_dir_all(&datadir).map_err(|e| format!("Failed to create datadir: {}", e))?; info!( log, @@ -52,7 +60,7 @@ fn run_account_manager( match matches.subcommand() { ("validator", Some(matches)) => match matches.subcommand() { - ("new", Some(matches)) => run_new_validator_subcommand(matches, datadir, context)?, + ("new", Some(matches)) => run_new_validator_subcommand(matches, datadir, env)?, _ => { return Err("Invalid 'validator new' command. See --help.".to_string()); } @@ -77,8 +85,9 @@ enum KeygenMethod { fn run_new_validator_subcommand( matches: &ArgMatches, datadir: PathBuf, - context: RuntimeContext, + mut env: Environment, ) -> Result<(), String> { + let context = env.core_context(); let log = context.log.clone(); let methods: Vec = match matches.subcommand() { @@ -110,7 +119,119 @@ fn run_new_validator_subcommand( } }; - let validators = make_validators(datadir.clone(), &methods, context.eth2_config.spec)?; + let deposit_value = matches + .value_of("deposit-value") + .ok_or_else(|| "No deposit-value".to_string())? + .parse::() + .map_err(|e| format!("Unable to parse deposit-value: {}", e))?; + + let validators = make_validators( + datadir.clone(), + &methods, + deposit_value, + &context.eth2_config.spec, + )?; + + if matches.is_present("send-deposits") { + let eth1_endpoint = matches + .value_of("eth1-endpoint") + .ok_or_else(|| "No eth1-endpoint".to_string())?; + let account_index = matches + .value_of("account-index") + .ok_or_else(|| "No account-index".to_string())? + .parse::() + .map_err(|e| format!("Unable to parse account-index: {}", e))?; + + // If supplied, load the eth1 account password from file. + let password = if let Some(password_path) = matches.value_of("password") { + Some( + File::open(password_path) + .map_err(|e| format!("Unable to open password file: {:?}", e)) + .and_then(|mut file| { + let mut password = String::new(); + file.read_to_string(&mut password) + .map_err(|e| format!("Unable to read password file to string: {:?}", e)) + .map(|_| password) + }) + .map(|password| { + // Trim the line feed from the end of the password file, if present. + if password.ends_with("\n") { + password[0..password.len() - 1].to_string() + } else { + password + } + })?, + ) + } else { + None + }; + + info!( + log, + "Submitting validator deposits"; + "eth1_node_http_endpoint" => eth1_endpoint + ); + + // Load the testnet configuration from disk, or use the default testnet. + let eth2_testnet_config: Eth2TestnetConfig = if let Some(testnet_dir_str) = + matches.value_of("testnet-dir") + { + let testnet_dir = testnet_dir_str + .parse::() + .map_err(|e| format!("Unable to parse testnet-dir: {}", e))?; + + if !testnet_dir.exists() { + return Err(format!( + "Testnet directory at {:?} does not exist", + testnet_dir + )); + } + + info!( + log, + "Loading deposit contract address"; + "testnet_dir" => format!("{:?}", &testnet_dir) + ); + + Eth2TestnetConfig::load(testnet_dir.clone()) + .map_err(|e| format!("Failed to load testnet dir at {:?}: {}", testnet_dir, e))? + } else { + info!( + log, + "Using Lighthouse testnet deposit contract"; + ); + + Eth2TestnetConfig::hard_coded() + .map_err(|e| format!("Failed to load hard_coded testnet dir: {}", e))? + }; + + // Convert from `types::Address` to `web3::types::Address`. + let deposit_contract = Address::from_slice( + eth2_testnet_config + .deposit_contract_address()? + .as_fixed_bytes(), + ); + + if let Err(()) = env.runtime().block_on(deposit_validators( + context.clone(), + eth1_endpoint.to_string(), + deposit_contract, + validators.clone(), + account_index, + deposit_value, + password, + )) { + error!( + log, + "Created validators but could not submit deposits"; + ) + } else { + info!( + log, + "Validator deposits complete"; + ); + } + } info!( log, @@ -126,14 +247,15 @@ fn run_new_validator_subcommand( fn make_validators( datadir: PathBuf, methods: &[KeygenMethod], - spec: ChainSpec, + deposit_value: u64, + spec: &ChainSpec, ) -> Result, String> { methods .par_iter() .map(|method| { let mut builder = ValidatorDirectoryBuilder::default() .spec(spec.clone()) - .full_deposit_amount()?; + .custom_deposit_amount(deposit_value); builder = match method { KeygenMethod::Insecure(index) => builder.insecure_keypairs(*index), @@ -148,3 +270,172 @@ fn make_validators( }) .collect() } + +/// For each `ValidatorDirectory`, submit a deposit transaction to the `eth1_endpoint`. +/// +/// Returns success as soon as the eth1 endpoint accepts the transaction (i.e., does not wait for +/// transaction success/revert). +fn deposit_validators( + context: RuntimeContext, + eth1_endpoint: String, + deposit_contract: Address, + validators: Vec, + account_index: usize, + deposit_value: u64, + password: Option, +) -> impl Future { + let log_1 = context.log.clone(); + let log_2 = context.log.clone(); + + Http::new(ð1_endpoint) + .map_err(move |e| { + error!( + log_1, + "Failed to start web3 HTTP transport"; + "error" => format!("{:?}", e) + ) + }) + .into_future() + /* + * Loop through the validator directories and submit the deposits. + */ + .and_then(move |(event_loop, transport)| { + let web3 = Web3::new(transport); + + futures::stream::iter_ok(validators) + .for_each(move |validator| { + let web3 = web3.clone(); + let log = log_2.clone(); + let password = password.clone(); + + deposit_validator( + web3, + deposit_contract, + &validator, + deposit_value, + account_index, + password, + log, + ) + }) + .map(|_| event_loop) + }) + // Web3 gives errors if the event loop is dropped whilst performing requests. + .map(|event_loop| drop(event_loop)) +} + +/// For the given `ValidatorDirectory`, submit a deposit transaction to the `web3` node. +/// +/// Returns success as soon as the eth1 endpoint accepts the transaction (i.e., does not wait for +/// transaction success/revert). +fn deposit_validator( + web3: Web3, + deposit_contract: Address, + validator: &ValidatorDirectory, + deposit_amount: u64, + account_index: usize, + password_opt: Option, + log: Logger, +) -> impl Future { + validator + .voting_keypair + .clone() + .ok_or_else(|| error!(log, "Validator does not have voting keypair")) + .and_then(|voting_keypair| { + validator + .deposit_data + .clone() + .ok_or_else(|| error!(log, "Validator does not have deposit data")) + .map(|deposit_data| (voting_keypair, deposit_data)) + }) + .into_future() + .and_then(move |(voting_keypair, deposit_data)| { + let pubkey_1 = voting_keypair.pk.clone(); + let pubkey_2 = voting_keypair.pk.clone(); + + let web3_1 = web3.clone(); + let web3_2 = web3.clone(); + + let log_1 = log.clone(); + let log_2 = log.clone(); + + web3.eth() + .accounts() + .map_err(|e| format!("Failed to get accounts: {:?}", e)) + .and_then(move |accounts| { + accounts + .get(account_index) + .cloned() + .ok_or_else(|| "Insufficient accounts for deposit".to_string()) + }) + /* + * If a password was supplied, unlock the account. + */ + .and_then(move |from_address| { + let future: Box + Send> = + if let Some(password) = password_opt { + // Unlock for only a single transaction. + let duration = None; + + let future = web3_1 + .personal() + .unlock_account(from_address, &password, duration) + .then(move |result| match result { + Ok(true) => Ok(from_address), + Ok(false) => { + Err("Eth1 node refused to unlock account. Check password." + .to_string()) + } + Err(e) => Err(format!("Eth1 unlock request failed: {:?}", e)), + }); + + Box::new(future) + } else { + Box::new(future::ok(from_address)) + }; + + future + }) + /* + * Submit the deposit transaction. + */ + .and_then(move |from| { + let tx_request = TransactionRequest { + from, + to: Some(deposit_contract), + gas: Some(U256::from(DEPOSIT_GAS)), + gas_price: None, + value: Some(U256::from(from_gwei(deposit_amount))), + data: Some(deposit_data.into()), + nonce: None, + condition: None, + }; + + web3_2 + .eth() + .send_transaction(tx_request) + .map_err(|e| format!("Failed to call deposit fn: {:?}", e)) + }) + .map(move |tx| { + info!( + log_1, + "Validator deposit successful"; + "eth1_tx_hash" => format!("{:?}", tx), + "validator_voting_pubkey" => format!("{:?}", pubkey_1) + ) + }) + .map_err(move |e| { + error!( + log_2, + "Validator deposit_failed"; + "error" => e, + "validator_voting_pubkey" => format!("{:?}", pubkey_2) + ) + }) + }) +} + +/// Converts gwei to wei. +fn from_gwei(gwei: u64) -> U256 { + U256::from(gwei) * U256::exp10(9) +} diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 57bf4f7bcc..ae99c8ce66 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -34,3 +34,6 @@ logging = { path = "../eth2/utils/logging" } futures = "0.1.29" environment = { path = "../lighthouse/environment" } genesis = { path = "genesis" } +eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" } +eth2-libp2p = { path = "./eth2-libp2p" } +eth2_ssz = { path = "../eth2/utils/ssz" } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7feb8f2f96..3cdcb7f9aa 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3,6 +3,7 @@ use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::{EventHandler, EventKind}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; +use crate::head_tracker::HeadTracker; use crate::metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; @@ -126,6 +127,8 @@ pub struct BeaconChain { pub fork_choice: ForkChoice, /// A handler for events generated by the beacon chain. pub event_handler: T::EventHandler, + /// Used to track the heads of the beacon chain. + pub(crate) head_tracker: HeadTracker, /// Logging to CLI, etc. pub(crate) log: Logger, } @@ -137,10 +140,35 @@ impl BeaconChain { pub fn persist(&self) -> Result<(), Error> { let timer = metrics::start_timer(&metrics::PERSIST_CHAIN); + let canonical_head = self.head(); + + let finalized_checkpoint = { + let beacon_block_root = canonical_head.beacon_state.finalized_checkpoint.root; + let beacon_block = self + .store + .get::>(&beacon_block_root)? + .ok_or_else(|| Error::MissingBeaconBlock(beacon_block_root))?; + let beacon_state_root = beacon_block.state_root; + let beacon_state = self + .store + .get_state(&beacon_state_root, Some(beacon_block.slot))? + .ok_or_else(|| Error::MissingBeaconState(beacon_state_root))?; + + CheckPoint { + beacon_block_root, + beacon_block, + beacon_state_root, + beacon_state, + } + }; + let p: PersistedBeaconChain = PersistedBeaconChain { - canonical_head: self.canonical_head.read().clone(), + canonical_head, + finalized_checkpoint, op_pool: PersistedOperationPool::from_operation_pool(&self.op_pool), genesis_block_root: self.genesis_block_root, + ssz_head_tracker: self.head_tracker.to_ssz_container(), + fork_choice: self.fork_choice.as_ssz_container(), }; let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); @@ -331,10 +359,17 @@ impl BeaconChain { self.canonical_head.read().clone() } + /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. + /// + /// Returns `(block_root, block_slot)`. + pub fn heads(&self) -> Vec<(Hash256, Slot)> { + self.head_tracker.heads() + } + /// Returns the `BeaconState` at the given slot. /// - /// Returns `None` when the state is not found in the database or there is an error skipping - /// to a future state. + /// Returns `None` when the state is not found in the database or there is an error skipping + /// to a future state. pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { let head_state = self.head().beacon_state; @@ -1219,6 +1254,8 @@ impl BeaconChain { metrics::stop_timer(db_write_timer); + self.head_tracker.register_block(block_root, &block); + let fork_choice_register_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER); @@ -1324,6 +1361,11 @@ impl BeaconChain { let (proposer_slashings, attester_slashings) = self.op_pool.get_slashings(&state, &self.spec); + let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; + let deposits = eth1_chain + .deposits_for_block_inclusion(&state, ð1_data, &self.spec)? + .into(); + let mut block = BeaconBlock { slot: state.slot, parent_root, @@ -1332,14 +1374,12 @@ impl BeaconChain { signature: Signature::empty_signature(), body: BeaconBlockBody { randao_reveal, - eth1_data: eth1_chain.eth1_data_for_block_production(&state, &self.spec)?, + eth1_data, graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), attestations: self.op_pool.get_attestations(&state, &self.spec).into(), - deposits: eth1_chain - .deposits_for_block_inclusion(&state, &self.spec)? - .into(), + deposits, voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), }, }; @@ -1596,6 +1636,23 @@ impl BeaconChain { } } +impl Drop for BeaconChain { + fn drop(&mut self) { + if let Err(e) = self.persist() { + error!( + self.log, + "Failed to persist BeaconChain on drop"; + "error" => format!("{:?}", e) + ) + } else { + info!( + self.log, + "Saved beacon chain state"; + ) + } + } +} + fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { if WRITE_BLOCK_PROCESSING_SSZ { let root = Hash256::from_slice(&state.tree_hash_root()); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index b8e3b868ff..98a4aa3c2e 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,5 +1,6 @@ use crate::eth1_chain::CachingEth1Backend; use crate::events::NullEventHandler; +use crate::head_tracker::HeadTracker; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use crate::{ BeaconChain, BeaconChainTypes, CheckPoint, Eth1Chain, Eth1ChainBackend, EventHandler, @@ -88,6 +89,8 @@ pub struct BeaconChainBuilder { eth1_chain: Option>, event_handler: Option, slot_clock: Option, + persisted_beacon_chain: Option>, + head_tracker: Option, spec: ChainSpec, log: Option, } @@ -128,6 +131,8 @@ where eth1_chain: None, event_handler: None, slot_clock: None, + persisted_beacon_chain: None, + head_tracker: None, spec: TEthSpec::default_spec(), log: None, } @@ -208,11 +213,17 @@ where self.op_pool = Some( p.op_pool + .clone() .into_operation_pool(&p.canonical_head.beacon_state, &self.spec), ); - self.finalized_checkpoint = Some(p.canonical_head); + self.finalized_checkpoint = Some(p.finalized_checkpoint.clone()); self.genesis_block_root = Some(p.genesis_block_root); + self.head_tracker = Some( + HeadTracker::from_ssz_container(&p.ssz_head_tracker) + .map_err(|e| format!("Failed to decode head tracker for database: {:?}", e))?, + ); + self.persisted_beacon_chain = Some(p); Ok(self) } @@ -264,28 +275,6 @@ where Ok(self.empty_op_pool()) } - /// Sets the `BeaconChain` fork choice backend. - /// - /// Requires the store and state to have been specified earlier in the build chain. - pub fn fork_choice_backend(mut self, backend: TLmdGhost) -> Result { - let store = self - .store - .clone() - .ok_or_else(|| "reduced_tree_fork_choice requires a store")?; - let genesis_block_root = self - .genesis_block_root - .ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?; - - self.fork_choice = Some(ForkChoice::new( - store, - backend, - genesis_block_root, - self.spec.genesis_slot, - )); - - Ok(self) - } - /// Sets the `BeaconChain` eth1 backend. pub fn eth1_backend(mut self, backend: Option) -> Self { self.eth1_chain = backend.map(Eth1Chain::new); @@ -337,19 +326,24 @@ where >, String, > { - let mut canonical_head = self - .finalized_checkpoint - .ok_or_else(|| "Cannot build without a state".to_string())?; + let log = self + .log + .ok_or_else(|| "Cannot build without a logger".to_string())?; + + // If this beacon chain is being loaded from disk, use the stored head. Otherwise, just use + // the finalized checkpoint (which is probably genesis). + let mut canonical_head = if let Some(persisted_beacon_chain) = self.persisted_beacon_chain { + persisted_beacon_chain.canonical_head + } else { + self.finalized_checkpoint + .ok_or_else(|| "Cannot build without a state".to_string())? + }; canonical_head .beacon_state .build_all_caches(&self.spec) .map_err(|e| format!("Failed to build state caches: {:?}", e))?; - let log = self - .log - .ok_or_else(|| "Cannot build without a logger".to_string())?; - if canonical_head.beacon_block.state_root != canonical_head.beacon_state_root { return Err("beacon_block.state_root != beacon_state".to_string()); } @@ -379,6 +373,7 @@ where event_handler: self .event_handler .ok_or_else(|| "Cannot build without an event handler".to_string())?, + head_tracker: self.head_tracker.unwrap_or_default(), log: log.clone(), }; @@ -414,27 +409,43 @@ where TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { - /// Initializes a new, empty (no recorded votes or blocks) fork choice, using the - /// `ThreadSafeReducedTree` backend. + /// Initializes a fork choice with the `ThreadSafeReducedTree` backend. /// - /// Requires the store and state to be initialized. - pub fn empty_reduced_tree_fork_choice(self) -> Result { + /// If this builder is being "resumed" from disk, then rebuild the last fork choice stored to + /// the database. Otherwise, create a new, empty fork choice. + pub fn reduced_tree_fork_choice(mut self) -> Result { let store = self .store .clone() .ok_or_else(|| "reduced_tree_fork_choice requires a store")?; - let finalized_checkpoint = &self - .finalized_checkpoint - .as_ref() - .expect("should have finalized checkpoint"); - let backend = ThreadSafeReducedTree::new( - store.clone(), - &finalized_checkpoint.beacon_block, - finalized_checkpoint.beacon_block_root, - ); + let fork_choice = if let Some(persisted_beacon_chain) = &self.persisted_beacon_chain { + ForkChoice::from_ssz_container( + persisted_beacon_chain.fork_choice.clone(), + store.clone(), + ) + .map_err(|e| format!("Unable to decode fork choice from db: {:?}", e))? + } else { + let finalized_checkpoint = &self + .finalized_checkpoint + .as_ref() + .ok_or_else(|| "fork_choice_backend requires a finalized_checkpoint")?; + let genesis_block_root = self + .genesis_block_root + .ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?; - self.fork_choice_backend(backend) + let backend = ThreadSafeReducedTree::new( + store.clone(), + &finalized_checkpoint.beacon_block, + finalized_checkpoint.beacon_block_root, + ); + + ForkChoice::new(store, backend, genesis_block_root, self.spec.genesis_slot) + }; + + self.fork_choice = Some(fork_choice); + + Ok(self) } } @@ -611,7 +622,7 @@ mod test { .null_event_handler() .testing_slot_clock(Duration::from_secs(1)) .expect("should configure testing slot clock") - .empty_reduced_tree_fork_choice() + .reduced_tree_fork_choice() .expect("should add fork choice to builder") .build() .expect("should build"); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 5082ddefaf..16278cf54c 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -4,7 +4,8 @@ use exit_future::Exit; use futures::Future; use integer_sqrt::IntegerSquareRoot; use rand::prelude::*; -use slog::{crit, Logger}; +use slog::{crit, debug, error, trace, Logger}; +use state_processing::per_block_processing::get_new_eth1_data; use std::collections::HashMap; use std::iter::DoubleEndedIterator; use std::iter::FromIterator; @@ -33,7 +34,7 @@ pub enum Error { /// voting period. UnableToGetPreviousStateRoot(BeaconStateError), /// The state required to find the previous eth1 block was not found in the store. - PreviousStateNotInDB, + PreviousStateNotInDB(Hash256), /// There was an error accessing an object in the database. StoreError(StoreError), /// The eth1 head block at the start of the eth1 voting period is unknown. @@ -89,16 +90,21 @@ where /// Returns a list of `Deposits` that may be included in a block. /// /// Including all of the returned `Deposits` in a block should _not_ cause it to become - /// invalid. + /// invalid (i.e., this function should respect the maximum). + /// + /// `eth1_data_vote` is the `Eth1Data` that the block producer would include in their + /// block. This vote may change the `state.eth1_data` value, which would change the deposit + /// count and therefore change the output of this function. pub fn deposits_for_block_inclusion( &self, state: &BeaconState, + eth1_data_vote: &Eth1Data, spec: &ChainSpec, ) -> Result, Error> { if self.use_dummy_backend { - DummyEth1ChainBackend::default().queued_deposits(state, spec) + DummyEth1ChainBackend::default().queued_deposits(state, eth1_data_vote, spec) } else { - self.backend.queued_deposits(state, spec) + self.backend.queued_deposits(state, eth1_data_vote, spec) } } } @@ -119,6 +125,7 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { fn queued_deposits( &self, beacon_state: &BeaconState, + eth1_data_vote: &Eth1Data, spec: &ChainSpec, ) -> Result, Error>; } @@ -131,6 +138,7 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { pub struct DummyEth1ChainBackend(PhantomData); impl Eth1ChainBackend for DummyEth1ChainBackend { + /// Produce some deterministic junk based upon the current epoch. fn eth1_data(&self, state: &BeaconState, _spec: &ChainSpec) -> Result { let current_epoch = state.current_epoch(); let slots_per_voting_period = T::slots_per_eth1_voting_period() as u64; @@ -146,7 +154,13 @@ impl Eth1ChainBackend for DummyEth1ChainBackend { }) } - fn queued_deposits(&self, _: &BeaconState, _: &ChainSpec) -> Result, Error> { + /// The dummy back-end never produces deposits. + fn queued_deposits( + &self, + _: &BeaconState, + _: &Eth1Data, + _: &ChainSpec, + ) -> Result, Error> { Ok(vec![]) } } @@ -201,24 +215,102 @@ impl CachingEth1Backend { impl Eth1ChainBackend for CachingEth1Backend { fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { + // Note: we do not return random junk if this function call fails as it would be caused by + // an internal error. let prev_eth1_hash = eth1_block_hash_at_start_of_voting_period(self.store.clone(), state)?; + let period = T::SlotsPerEth1VotingPeriod::to_u64(); + let eth1_follow_distance = spec.eth1_follow_distance; + let voting_period_start_slot = (state.slot / period) * period; + let voting_period_start_seconds = slot_start_seconds::( + state.genesis_time, + spec.milliseconds_per_slot, + voting_period_start_slot, + ); + let blocks = self.core.blocks().read(); - let eth1_data = eth1_data_sets(blocks.iter(), state, prev_eth1_hash, spec) - .map(|(new_eth1_data, all_eth1_data)| { - collect_valid_votes(state, new_eth1_data, all_eth1_data) - }) - .and_then(find_winning_vote) - .unwrap_or_else(|| { - crit!( - self.log, - "Unable to cast valid vote for Eth1Data"; - "hint" => "check connection to eth1 node", - "reason" => "no votes", - ); - random_eth1_data() - }); + let (new_eth1_data, all_eth1_data) = if let Some(sets) = eth1_data_sets( + blocks.iter(), + prev_eth1_hash, + voting_period_start_seconds, + spec, + &self.log, + ) { + sets + } else { + // The algorithm was unable to find the `new_eth1_data` and `all_eth1_data` sets. + // + // This is likely because the caches are empty or the previous eth1 block hash is not + // in the cache. + // + // This situation can also be caused when a testnet does not have an adequate delay + // between the eth1 genesis block and the eth2 genesis block. This delay needs to be at + // least `2 * ETH1_FOLLOW_DISTANCE`. + crit!( + self.log, + "Unable to find eth1 data sets"; + "lowest_block_number" => self.core.lowest_block_number(), + "earliest_block_timestamp" => self.core.earliest_block_timestamp(), + "genesis_time" => state.genesis_time, + "outcome" => "casting random eth1 vote" + ); + + return Ok(random_eth1_data()); + }; + + trace!( + self.log, + "Found eth1 data sets"; + "all_eth1_data" => all_eth1_data.len(), + "new_eth1_data" => new_eth1_data.len(), + ); + + let valid_votes = collect_valid_votes(state, new_eth1_data, all_eth1_data); + + let eth1_data = if let Some(eth1_data) = find_winning_vote(valid_votes) { + eth1_data + } else { + // In this case, there are no other viable votes (perhaps there are no votes yet or all + // the existing votes are junk). + // + // Here we choose the latest block in our voting window. + blocks + .iter() + .rev() + .skip_while(|eth1_block| eth1_block.timestamp > voting_period_start_seconds) + .skip(eth1_follow_distance as usize) + .next() + .map(|block| { + trace!( + self.log, + "Choosing default eth1_data"; + "eth1_block_number" => block.number, + "eth1_block_hash" => format!("{:?}", block.hash), + ); + + block + }) + .and_then(|block| block.clone().eth1_data()) + .unwrap_or_else(|| { + crit!( + self.log, + "Unable to find a winning eth1 vote"; + "outcome" => "casting random eth1 vote" + ); + + random_eth1_data() + }) + }; + + debug!( + self.log, + "Produced vote for eth1 chain"; + "is_period_tail" => is_period_tail(state), + "deposit_root" => format!("{:?}", eth1_data.deposit_root), + "deposit_count" => eth1_data.deposit_count, + "block_hash" => format!("{:?}", eth1_data.block_hash), + ); Ok(eth1_data) } @@ -226,10 +318,15 @@ impl Eth1ChainBackend for CachingEth1Backend { fn queued_deposits( &self, state: &BeaconState, + eth1_data_vote: &Eth1Data, _spec: &ChainSpec, ) -> Result, Error> { - let deposit_count = state.eth1_data.deposit_count; let deposit_index = state.eth1_deposit_index; + let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote) { + new_eth1_data.deposit_count + } else { + state.eth1_data.deposit_count + }; if deposit_index > deposit_count { Err(Error::DepositIndexTooHigh) @@ -281,12 +378,14 @@ fn eth1_block_hash_at_start_of_voting_period( ) -> Result { let period = T::SlotsPerEth1VotingPeriod::to_u64(); - // Find `state.eth1_data.block_hash` for the state at the start of the voting period. - if state.slot % period < period / 2 { - // When the state is less than half way through the period we can safely assume that - // the eth1_data has not changed since the start of the period. + if !eth1_data_change_is_possible(state) { + // If there are less than 50% of the votes in the current state, it's impossible that the + // `eth1_data.block_hash` has changed from the value at `state.eth1_data.block_hash`. Ok(state.eth1_data.block_hash) } else { + // If there have been more than 50% of votes in this period it's possible (but not + // necessary) that the `state.eth1_data.block_hash` has been changed since the start of the + // voting period. let slot = (state.slot / period) * period; let prev_state_root = state .get_state_root(slot) @@ -296,33 +395,32 @@ fn eth1_block_hash_at_start_of_voting_period( .get_state::(&prev_state_root, Some(slot)) .map_err(Error::StoreError)? .map(|state| state.eth1_data.block_hash) - .ok_or_else(|| Error::PreviousStateNotInDB) + .ok_or_else(|| Error::PreviousStateNotInDB(*prev_state_root)) } } +/// Returns true if there are enough eth1 votes in the given `state` to have updated +/// `state.eth1_data`. +fn eth1_data_change_is_possible(state: &BeaconState) -> bool { + 2 * state.eth1_data_votes.len() > E::SlotsPerEth1VotingPeriod::to_usize() +} + /// Calculates and returns `(new_eth1_data, all_eth1_data)` for the given `state`, based upon the /// blocks in the `block` iterator. /// /// `prev_eth1_hash` is the `eth1_data.block_hash` at the start of the voting period defined by /// `state.slot`. -fn eth1_data_sets<'a, T: EthSpec, I>( +fn eth1_data_sets<'a, I>( blocks: I, - state: &BeaconState, prev_eth1_hash: Hash256, + voting_period_start_seconds: u64, spec: &ChainSpec, + log: &Logger, ) -> Option<(Eth1DataBlockNumber, Eth1DataBlockNumber)> where - T: EthSpec, I: DoubleEndedIterator + Clone, { - let period = T::SlotsPerEth1VotingPeriod::to_u64(); let eth1_follow_distance = spec.eth1_follow_distance; - let voting_period_start_slot = (state.slot / period) * period; - let voting_period_start_seconds = slot_start_seconds::( - state.genesis_time, - spec.milliseconds_per_slot, - voting_period_start_slot, - ); let in_scope_eth1_data = blocks .rev() @@ -345,6 +443,12 @@ where HashMap::from_iter(all_eth1_data), )) } else { + error!( + log, + "The previous eth1 hash is not in cache"; + "previous_hash" => format!("{:?}", prev_eth1_hash) + ); + None } } @@ -356,8 +460,6 @@ fn collect_valid_votes( new_eth1_data: Eth1DataBlockNumber, all_eth1_data: Eth1DataBlockNumber, ) -> Eth1DataVoteCount { - let slots_per_eth1_voting_period = T::SlotsPerEth1VotingPeriod::to_u64(); - let mut valid_votes = HashMap::new(); state @@ -368,10 +470,7 @@ fn collect_valid_votes( .get(vote) .map(|block_number| (vote.clone(), *block_number)) .or_else(|| { - let slot = state.slot % slots_per_eth1_voting_period; - let period_tail = slot >= slots_per_eth1_voting_period.integer_sqrt(); - - if period_tail { + if is_period_tail(state) { all_eth1_data .get(vote) .map(|block_number| (vote.clone(), *block_number)) @@ -390,6 +489,15 @@ fn collect_valid_votes( valid_votes } +/// Indicates if the given `state` is in the tail of it's eth1 voting period (i.e., in the later +/// slots). +fn is_period_tail(state: &BeaconState) -> bool { + let slots_per_eth1_voting_period = E::SlotsPerEth1VotingPeriod::to_u64(); + let slot = state.slot % slots_per_eth1_voting_period; + + slot >= slots_per_eth1_voting_period.integer_sqrt() +} + /// Selects the winning vote from `valid_votes`. fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option { valid_votes @@ -417,6 +525,7 @@ fn slot_start_seconds( #[cfg(test)] mod test { use super::*; + use environment::null_logger; use types::{test_utils::DepositTestTask, MinimalEthSpec}; type E = MinimalEthSpec; @@ -468,7 +577,6 @@ mod test { mod eth1_chain_json_backend { use super::*; - use environment::null_logger; use eth1::DepositLog; use store::MemoryStore; use types::test_utils::{generate_deterministic_keypair, TestingDepositBuilder}; @@ -514,7 +622,7 @@ mod test { assert!( eth1_chain - .deposits_for_block_inclusion(&state, spec) + .deposits_for_block_inclusion(&state, &random_eth1_data(), spec) .is_ok(), "should succeed if cache is empty but no deposits are required" ); @@ -523,7 +631,7 @@ mod test { assert!( eth1_chain - .deposits_for_block_inclusion(&state, spec) + .deposits_for_block_inclusion(&state, &random_eth1_data(), spec) .is_err(), "should fail to get deposits if required, but cache is empty" ); @@ -567,7 +675,7 @@ mod test { assert!( eth1_chain - .deposits_for_block_inclusion(&state, spec) + .deposits_for_block_inclusion(&state, &random_eth1_data(), spec) .is_ok(), "should succeed if no deposits are required" ); @@ -579,7 +687,7 @@ mod test { state.eth1_data.deposit_count = i as u64; let deposits_for_inclusion = eth1_chain - .deposits_for_block_inclusion(&state, spec) + .deposits_for_block_inclusion(&state, &random_eth1_data(), spec) .expect(&format!("should find deposit for {}", i)); let expected_len = @@ -657,6 +765,14 @@ mod test { prev_state.slot = Slot::new(period * 1_000); state.slot = Slot::new(period * 1_000 + period / 2); + // Add 50% of the votes so a lookup is required. + for _ in 0..period / 2 + 1 { + state + .eth1_data_votes + .push(random_eth1_data()) + .expect("should push eth1 vote"); + } + (0..2048).for_each(|i| { eth1_chain .backend @@ -728,6 +844,14 @@ mod test { state.slot = Slot::new(period / 2); + // Add 50% of the votes so a lookup is required. + for _ in 0..period / 2 + 1 { + state + .eth1_data_votes + .push(random_eth1_data()) + .expect("should push eth1 vote"); + } + let expected_root = Hash256::from_low_u64_be(42); prev_state.eth1_data.block_hash = expected_root; @@ -757,8 +881,20 @@ mod test { mod eth1_data_sets { use super::*; + fn get_voting_period_start_seconds(state: &BeaconState, spec: &ChainSpec) -> u64 { + let period = ::SlotsPerEth1VotingPeriod::to_u64(); + let voting_period_start_slot = (state.slot / period) * period; + slot_start_seconds::( + state.genesis_time, + spec.milliseconds_per_slot, + voting_period_start_slot, + ) + } + #[test] fn empty_cache() { + let log = null_logger().unwrap(); + let spec = &E::default_spec(); let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); let prev_eth1_hash = Hash256::zero(); @@ -766,13 +902,21 @@ mod test { let blocks = vec![]; assert_eq!( - eth1_data_sets(blocks.iter(), &state, prev_eth1_hash, &spec), + eth1_data_sets( + blocks.iter(), + prev_eth1_hash, + get_voting_period_start_seconds(&state, spec), + &spec, + &log + ), None ); } #[test] fn no_known_block_hash() { + let log = null_logger().unwrap(); + let mut spec = E::default_spec(); spec.milliseconds_per_slot = 1_000; @@ -782,13 +926,21 @@ mod test { let blocks = vec![get_eth1_block(0, 0)]; assert_eq!( - eth1_data_sets(blocks.iter(), &state, prev_eth1_hash, &spec), + eth1_data_sets( + blocks.iter(), + prev_eth1_hash, + get_voting_period_start_seconds(&state, &spec), + &spec, + &log + ), None ); } #[test] fn ideal_scenario() { + let log = null_logger().unwrap(); + let mut spec = E::default_spec(); spec.milliseconds_per_slot = 1_000; @@ -805,9 +957,14 @@ mod test { .map(|i| get_eth1_block(i, i)) .collect::>(); - let (new_eth1_data, all_eth1_data) = - eth1_data_sets(blocks.iter(), &state, prev_eth1_hash, &spec) - .expect("should find data"); + let (new_eth1_data, all_eth1_data) = eth1_data_sets( + blocks.iter(), + prev_eth1_hash, + get_voting_period_start_seconds(&state, &spec), + &spec, + &log, + ) + .expect("should find data"); assert_eq!( all_eth1_data.len(), diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 0b20fdb8d6..beed9de031 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -1,6 +1,7 @@ use crate::{errors::BeaconChainError, metrics, BeaconChain, BeaconChainTypes}; use lmd_ghost::LmdGhost; use parking_lot::RwLock; +use ssz_derive::{Decode, Encode}; use state_processing::{common::get_attesting_indices, per_slot_processing}; use std::sync::Arc; use store::{Error as StoreError, Store}; @@ -34,6 +35,16 @@ pub struct ForkChoice { best_justified_checkpoint: RwLock, } +impl PartialEq for ForkChoice { + /// This implementation ignores the `store`. + fn eq(&self, other: &Self) -> bool { + self.backend == other.backend + && self.genesis_block_root == other.genesis_block_root + && *self.justified_checkpoint.read() == *other.justified_checkpoint.read() + && *self.best_justified_checkpoint.read() == *other.best_justified_checkpoint.read() + } +} + impl ForkChoice { /// Instantiate a new fork chooser. /// @@ -291,6 +302,42 @@ impl ForkChoice { .update_finalized_root(finalized_block, finalized_block_root) .map_err(Into::into) } + + /// Returns a `SszForkChoice` which contains the current state of `Self`. + pub fn as_ssz_container(&self) -> SszForkChoice { + SszForkChoice { + genesis_block_root: self.genesis_block_root.clone(), + justified_checkpoint: self.justified_checkpoint.read().clone(), + best_justified_checkpoint: self.best_justified_checkpoint.read().clone(), + backend_bytes: self.backend.as_bytes(), + } + } + + /// Instantiates `Self` from a prior `SszForkChoice`. + /// + /// The created `Self` will have the same state as the `Self` that created the `SszForkChoice`. + pub fn from_ssz_container(ssz_container: SszForkChoice, store: Arc) -> Result { + let backend = LmdGhost::from_bytes(&ssz_container.backend_bytes, store.clone())?; + + Ok(Self { + store, + backend, + genesis_block_root: ssz_container.genesis_block_root, + justified_checkpoint: RwLock::new(ssz_container.justified_checkpoint), + best_justified_checkpoint: RwLock::new(ssz_container.best_justified_checkpoint), + }) + } +} + +/// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes. +/// +/// This is used when persisting the state of the `BeaconChain` to disk. +#[derive(Encode, Decode, Clone)] +pub struct SszForkChoice { + genesis_block_root: Hash256, + justified_checkpoint: Checkpoint, + best_justified_checkpoint: Checkpoint, + backend_bytes: Vec, } impl From for Error { diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs new file mode 100644 index 0000000000..850f9629c5 --- /dev/null +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -0,0 +1,205 @@ +use parking_lot::RwLock; +use ssz_derive::{Decode, Encode}; +use std::collections::HashMap; +use std::iter::FromIterator; +use types::{BeaconBlock, EthSpec, Hash256, Slot}; + +#[derive(Debug, PartialEq)] +pub enum Error { + MismatchingLengths { roots_len: usize, slots_len: usize }, +} + +/// Maintains a list of `BeaconChain` head block roots and slots. +/// +/// Each time a new block is imported, it should be applied to the `Self::register_block` function. +/// In order for this struct to be effective, every single block that is imported must be +/// registered here. +#[derive(Default, Debug)] +pub struct HeadTracker(RwLock>); + +impl HeadTracker { + /// Register a block with `Self`, so it may or may not be included in a `Self::heads` call. + /// + /// This function assumes that no block is imported without its parent having already been + /// imported. It cannot detect an error if this is not the case, it is the responsibility of + /// the upstream user. + pub fn register_block(&self, block_root: Hash256, block: &BeaconBlock) { + let mut map = self.0.write(); + + map.remove(&block.parent_root); + map.insert(block_root, block.slot); + } + + /// Returns the list of heads in the chain. + pub fn heads(&self) -> Vec<(Hash256, Slot)> { + self.0 + .read() + .iter() + .map(|(root, slot)| (*root, *slot)) + .collect() + } + + /// Returns a `SszHeadTracker`, which contains all necessary information to restore the state + /// of `Self` at some later point. + pub fn to_ssz_container(&self) -> SszHeadTracker { + let (roots, slots) = self + .0 + .read() + .iter() + .map(|(hash, slot)| (*hash, *slot)) + .unzip(); + + SszHeadTracker { roots, slots } + } + + /// Creates a new `Self` from the given `SszHeadTracker`, restoring `Self` to the same state of + /// the `Self` that created the `SszHeadTracker`. + pub fn from_ssz_container(ssz_container: &SszHeadTracker) -> Result { + let roots_len = ssz_container.roots.len(); + let slots_len = ssz_container.slots.len(); + + if roots_len != slots_len { + return Err(Error::MismatchingLengths { + roots_len, + slots_len, + }); + } else { + let map = HashMap::from_iter( + ssz_container + .roots + .iter() + .zip(ssz_container.slots.iter()) + .map(|(root, slot)| (*root, *slot)), + ); + + Ok(Self(RwLock::new(map))) + } + } +} + +impl PartialEq for HeadTracker { + fn eq(&self, other: &HeadTracker) -> bool { + *self.0.read() == *other.0.read() + } +} + +/// Helper struct that is used to encode/decode the state of the `HeadTracker` as SSZ bytes. +/// +/// This is used when persisting the state of the `BeaconChain` to disk. +#[derive(Encode, Decode, Clone)] +pub struct SszHeadTracker { + roots: Vec, + slots: Vec, +} + +#[cfg(test)] +mod test { + use super::*; + use ssz::{Decode, Encode}; + use types::MainnetEthSpec; + + type E = MainnetEthSpec; + + #[test] + fn block_add() { + let spec = &E::default_spec(); + + let head_tracker = HeadTracker::default(); + + for i in 0..16 { + let mut block = BeaconBlock::empty(spec); + let block_root = Hash256::from_low_u64_be(i); + + block.slot = Slot::new(i); + block.parent_root = if i == 0 { + Hash256::random() + } else { + Hash256::from_low_u64_be(i - 1) + }; + + head_tracker.register_block::(block_root, &block); + } + + assert_eq!( + head_tracker.heads(), + vec![(Hash256::from_low_u64_be(15), Slot::new(15))], + "should only have one head" + ); + + let mut block = BeaconBlock::empty(spec); + let block_root = Hash256::from_low_u64_be(42); + block.slot = Slot::new(15); + block.parent_root = Hash256::from_low_u64_be(14); + head_tracker.register_block::(block_root, &block); + + let heads = head_tracker.heads(); + + assert_eq!(heads.len(), 2, "should only have two heads"); + assert!( + heads + .iter() + .any(|(root, slot)| *root == Hash256::from_low_u64_be(15) && *slot == Slot::new(15)), + "should contain first head" + ); + assert!( + heads + .iter() + .any(|(root, slot)| *root == Hash256::from_low_u64_be(42) && *slot == Slot::new(15)), + "should contain second head" + ); + } + + #[test] + fn empty_round_trip() { + let non_empty = HeadTracker::default(); + for i in 0..16 { + non_empty.0.write().insert(Hash256::random(), Slot::new(i)); + } + let bytes = non_empty.to_ssz_container().as_ssz_bytes(); + + assert_eq!( + HeadTracker::from_ssz_container( + &SszHeadTracker::from_ssz_bytes(&bytes).expect("should decode") + ), + Ok(non_empty), + "non_empty should pass round trip" + ); + } + + #[test] + fn non_empty_round_trip() { + let non_empty = HeadTracker::default(); + for i in 0..16 { + non_empty.0.write().insert(Hash256::random(), Slot::new(i)); + } + let bytes = non_empty.to_ssz_container().as_ssz_bytes(); + + assert_eq!( + HeadTracker::from_ssz_container( + &SszHeadTracker::from_ssz_bytes(&bytes).expect("should decode") + ), + Ok(non_empty), + "non_empty should pass round trip" + ); + } + + #[test] + fn bad_length() { + let container = SszHeadTracker { + roots: vec![Hash256::random()], + slots: vec![], + }; + let bytes = container.as_ssz_bytes(); + + assert_eq!( + HeadTracker::from_ssz_container( + &SszHeadTracker::from_ssz_bytes(&bytes).expect("should decode") + ), + Err(Error::MismatchingLengths { + roots_len: 1, + slots_len: 0 + }), + "should fail decoding with bad lengths" + ); + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 3e412fe67a..1112a6dd9a 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -9,6 +9,7 @@ mod errors; pub mod eth1_chain; pub mod events; mod fork_choice; +mod head_tracker; mod metrics; mod persisted_beacon_chain; pub mod test_utils; diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index 52a2028fca..08b75420d2 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -1,3 +1,5 @@ +use crate::fork_choice::SszForkChoice; +use crate::head_tracker::SszHeadTracker; use crate::{BeaconChainTypes, CheckPoint}; use operation_pool::PersistedOperationPool; use ssz::{Decode, Encode}; @@ -8,11 +10,14 @@ use types::Hash256; /// 32-byte key for accessing the `PersistedBeaconChain`. pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA"; -#[derive(Encode, Decode)] +#[derive(Clone, Encode, Decode)] pub struct PersistedBeaconChain { pub canonical_head: CheckPoint, + pub finalized_checkpoint: CheckPoint, pub op_pool: PersistedOperationPool, pub genesis_block_root: Hash256, + pub ssz_head_tracker: SszHeadTracker, + pub fork_choice: SszForkChoice, } impl SimpleStoreItem for PersistedBeaconChain { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6916982e0a..a43cc1d0fd 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -25,7 +25,10 @@ use types::{ pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; pub use types::test_utils::generate_deterministic_keypairs; -pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // 4th September 2019 +// 4th September 2019 +pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; +// This parameter is required by a builder but not used because we use the `TestingSlotClock`. +pub const HARNESS_SLOT_TIME: Duration = Duration::from_secs(1); pub type BaseHarnessType = Witness< TStore, @@ -98,9 +101,9 @@ impl BeaconChainHarness> { .dummy_eth1_backend() .expect("should build dummy backend") .null_event_handler() - .testing_slot_clock(Duration::from_secs(1)) + .testing_slot_clock(HARNESS_SLOT_TIME) .expect("should configure testing slot clock") - .empty_reduced_tree_fork_choice() + .reduced_tree_fork_choice() .expect("should add fork choice to builder") .build() .expect("should build"); @@ -115,7 +118,7 @@ impl BeaconChainHarness> { impl BeaconChainHarness> { /// Instantiate a new harness with `validator_count` initial validators. - pub fn with_disk_store( + pub fn new_with_disk_store( eth_spec_instance: E, store: Arc, keypairs: Vec, @@ -140,9 +143,46 @@ impl BeaconChainHarness> { .dummy_eth1_backend() .expect("should build dummy backend") .null_event_handler() + .testing_slot_clock(HARNESS_SLOT_TIME) + .expect("should configure testing slot clock") + .reduced_tree_fork_choice() + .expect("should add fork choice to builder") + .build() + .expect("should build"); + + Self { + spec: chain.spec.clone(), + chain, + keypairs, + } + } + + /// Instantiate a new harness with `validator_count` initial validators. + pub fn resume_from_disk_store( + eth_spec_instance: E, + store: Arc, + keypairs: Vec, + ) -> Self { + let spec = E::default_spec(); + + let log = TerminalLoggerBuilder::new() + .level(Severity::Warning) + .build() + .expect("logger should build"); + + let chain = BeaconChainBuilder::new(eth_spec_instance) + .logger(log.clone()) + .custom_spec(spec.clone()) + .store(store.clone()) + .store_migrator( as Migrate<_, E>>::new(store)) + .resume_from_db() + .expect("should resume beacon chain from db") + .dummy_eth1_backend() + .expect("should build dummy backend") + .null_event_handler() .testing_slot_clock(Duration::from_secs(1)) .expect("should configure testing slot clock") - .empty_reduced_tree_fork_choice() + .reduced_tree_fork_choice() .expect("should add fork choice to builder") .build() .expect("should build"); diff --git a/beacon_node/beacon_chain/tests/persistence_tests.rs b/beacon_node/beacon_chain/tests/persistence_tests.rs new file mode 100644 index 0000000000..5e9977a23e --- /dev/null +++ b/beacon_node/beacon_chain/tests/persistence_tests.rs @@ -0,0 +1,134 @@ +#![cfg(not(debug_assertions))] + +#[macro_use] +extern crate lazy_static; + +use beacon_chain::{ + test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}, + BeaconChain, BeaconChainTypes, +}; +use sloggers::{null::NullLoggerBuilder, Build}; +use std::sync::Arc; +use store::DiskStore; +use tempfile::{tempdir, TempDir}; +use types::{EthSpec, Keypair, MinimalEthSpec}; + +type E = MinimalEthSpec; + +// Should ideally be divisible by 3. +pub const VALIDATOR_COUNT: usize = 24; + +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); +} + +fn get_store(db_path: &TempDir) -> Arc { + let spec = E::default_spec(); + let hot_path = db_path.path().join("hot_db"); + let cold_path = db_path.path().join("cold_db"); + let log = NullLoggerBuilder.build().expect("logger should build"); + Arc::new( + DiskStore::open(&hot_path, &cold_path, spec, log).expect("disk store should initialize"), + ) +} + +#[test] +fn finalizes_after_resuming_from_db() { + let validator_count = 16; + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8; + let first_half = num_blocks_produced / 2; + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + + let harness = BeaconChainHarness::new_with_disk_store( + MinimalEthSpec, + store.clone(), + KEYPAIRS[0..validator_count].to_vec(), + ); + + harness.advance_slot(); + + harness.extend_chain( + first_half as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + assert!( + harness.chain.head().beacon_state.finalized_checkpoint.epoch > 0, + "the chain should have already finalized" + ); + + let latest_slot = harness.chain.slot().expect("should have a slot"); + + harness.chain.persist().expect("should persist the chain"); + + let resumed_harness = BeaconChainHarness::resume_from_disk_store( + MinimalEthSpec, + store, + KEYPAIRS[0..validator_count].to_vec(), + ); + + assert_chains_pretty_much_the_same(&harness.chain, &resumed_harness.chain); + + // Ensures we don't accidentally use it again. + // + // Note: this will persist the chain again, but that shouldn't matter since nothing has + // changed. + drop(harness); + + // Set the slot clock of the resumed harness to be in the slot following the previous harness. + // + // This allows us to produce the block at the next slot. + resumed_harness + .chain + .slot_clock + .set_slot(latest_slot.as_u64() + 1); + + resumed_harness.extend_chain( + (num_blocks_produced - first_half) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let state = &resumed_harness.chain.head().beacon_state; + assert_eq!( + state.slot, num_blocks_produced, + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + num_blocks_produced / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint.epoch, + state.current_epoch() - 1, + "the head should be justified one behind the current epoch" + ); + assert_eq!( + state.finalized_checkpoint.epoch, + state.current_epoch() - 2, + "the head should be finalized two behind the current epoch" + ); +} + +/// Checks that two chains are the same, for the purpose of this tests. +/// +/// Several fields that are hard/impossible to check are ignored (e.g., the store). +fn assert_chains_pretty_much_the_same(a: &BeaconChain, b: &BeaconChain) { + assert_eq!(a.spec, b.spec, "spec should be equal"); + assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal"); + assert_eq!(a.head(), b.head(), "head() should be equal"); + assert_eq!(a.heads(), b.heads(), "heads() should be equal"); + assert_eq!( + a.genesis_block_root, b.genesis_block_root, + "genesis_block_root should be equal" + ); + assert!( + a.fork_choice == b.fork_choice, + "fork_choice should be equal" + ); +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 3b548fce46..1dd05da399 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -37,7 +37,7 @@ fn get_store(db_path: &TempDir) -> Arc { } fn get_harness(store: Arc, validator_count: usize) -> TestHarness { - let harness = BeaconChainHarness::with_disk_store( + let harness = BeaconChainHarness::new_with_disk_store( MinimalEthSpec, store, KEYPAIRS[0..validator_count].to_vec(), diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 92d22529e5..e5293cd196 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -28,7 +28,6 @@ serde_yaml = "0.8.11" slog = { version = "2.5.2", features = ["max_level_trace"] } slog-async = "2.3.0" tokio = "0.1.22" -clap = "2.33.0" dirs = "2.0.2" exit-future = "0.1.4" futures = "0.1.29" @@ -39,3 +38,4 @@ eth1 = { path = "../eth1" } genesis = { path = "../genesis" } environment = { path = "../../lighthouse/environment" } lighthouse_bootstrap = { path = "../../eth2/utils/lighthouse_bootstrap" } +eth2_ssz = { path = "../../eth2/utils/ssz" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 84e9feff00..e4da78da65 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -23,13 +23,14 @@ use lighthouse_bootstrap::Bootstrapper; use lmd_ghost::LmdGhost; use network::{NetworkConfig, NetworkMessage, Service as NetworkService}; use slog::{debug, error, info, warn}; +use ssz::Decode; use std::net::SocketAddr; use std::path::Path; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::mpsc::UnboundedSender; use tokio::timer::Interval; -use types::{ChainSpec, EthSpec}; +use types::{BeaconState, ChainSpec, EthSpec}; use websocket_server::{Config as WebSocketConfig, WebSocketSender}; /// The interval between notifier events. @@ -37,7 +38,7 @@ pub const NOTIFIER_INTERVAL_SECONDS: u64 = 15; /// Create a warning log whenever the peer count is at or below this value. pub const WARN_PEER_COUNT: usize = 1; /// Interval between polling the eth1 node for genesis information. -pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 500; +pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000; /// Builds a `Client` instance. /// @@ -148,7 +149,7 @@ where })?; let context = runtime_context .ok_or_else(|| "beacon_chain_start_method requires a log".to_string())? - .service_context("beacon"); + .service_context("beacon".into()); let spec = chain_spec .ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?; @@ -187,40 +188,34 @@ where Box::new(future) } - ClientGenesis::DepositContract => { - let genesis_service = Eth1GenesisService::new( - // Some of the configuration options for `Eth1Config` are - // hard-coded when listening for genesis from the deposit contract. - // - // The idea is that the `Eth1Config` supplied to this function - // (`config`) is intended for block production duties (i.e., - // listening for deposit events and voting on eth1 data) and that - // we can make listening for genesis more efficient if we modify - // some params. - Eth1Config { - // Truncating the block cache makes searching for genesis more - // complicated. - block_cache_truncation: None, - // Scan large ranges of blocks when awaiting genesis. - blocks_per_log_query: 1_000, - // Only perform a single log request each time the eth1 node is - // polled. - // - // For small testnets this makes finding genesis much faster, - // as it usually happens within 1,000 blocks. - max_log_requests_per_update: Some(1), - // Only perform a single block request each time the eth1 node - // is polled. - // - // For small testnets, this is much faster as they do not have - // a `MIN_GENESIS_SECONDS`, so after `MIN_GENESIS_VALIDATOR_COUNT` - // has been reached only a single block needs to be read. - max_blocks_per_update: Some(1), - ..config - }, - context.log.clone(), + ClientGenesis::SszBytes { + genesis_state_bytes, + } => { + info!( + context.log, + "Starting from known genesis state"; ); + let result = BeaconState::from_ssz_bytes(&genesis_state_bytes) + .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e)); + + let future = result + .and_then(move |genesis_state| builder.genesis_state(genesis_state)) + .into_future() + .map(|v| (v, None)); + + Box::new(future) + } + ClientGenesis::DepositContract => { + info!( + context.log, + "Waiting for eth2 genesis from eth1"; + "eth1_node" => &config.endpoint + ); + + let genesis_service = + Eth1GenesisService::new(config, context.log.clone()); + let future = genesis_service .wait_for_genesis_state( Duration::from_millis(ETH1_GENESIS_UPDATE_INTERVAL_MILLIS), @@ -275,7 +270,7 @@ where .runtime_context .as_ref() .ok_or_else(|| "libp2p_network requires a runtime_context")? - .service_context("network"); + .service_context("network".into()); let (network, network_send) = NetworkService::new(beacon_chain, config, &context.executor, context.log) @@ -301,7 +296,7 @@ where .runtime_context .as_ref() .ok_or_else(|| "http_server requires a runtime_context")? - .service_context("http"); + .service_context("http".into()); let network = self .libp2p_network .clone() @@ -341,7 +336,7 @@ where .runtime_context .as_ref() .ok_or_else(|| "peer_count_notifier requires a runtime_context")? - .service_context("peer_notifier"); + .service_context("peer_notifier".into()); let log = context.log.clone(); let log_2 = context.log.clone(); let network = self @@ -384,7 +379,7 @@ where .runtime_context .as_ref() .ok_or_else(|| "slot_notifier requires a runtime_context")? - .service_context("slot_notifier"); + .service_context("slot_notifier".into()); let log = context.log.clone(); let log_2 = log.clone(); let beacon_chain = self @@ -498,7 +493,7 @@ where .clone() .ok_or_else(|| "beacon_chain requires a slot clock")?, ) - .empty_reduced_tree_fork_choice() + .reduced_tree_fork_choice() .map_err(|e| format!("Failed to init fork choice: {}", e))? .build() .map_err(|e| format!("Failed to build beacon chain: {}", e))?; @@ -537,7 +532,7 @@ where .runtime_context .as_ref() .ok_or_else(|| "websocket_event_handler requires a runtime_context")? - .service_context("ws"); + .service_context("ws".into()); let (sender, exit_signal, listening_addr): ( WebSocketSender, @@ -587,7 +582,7 @@ where .runtime_context .as_ref() .ok_or_else(|| "disk_store requires a log".to_string())? - .service_context("freezer_db"); + .service_context("freezer_db".into()); let spec = self .chain_spec .clone() @@ -715,7 +710,7 @@ where .runtime_context .as_ref() .ok_or_else(|| "caching_eth1_backend requires a runtime_context")? - .service_context("eth1_rpc"); + .service_context("eth1_rpc".into()); let beacon_chain_builder = self .beacon_chain_builder .ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?; @@ -726,6 +721,17 @@ where let backend = if let Some(eth1_service_from_genesis) = self.eth1_service { eth1_service_from_genesis.update_config(config.clone())?; + + // This cache is not useful because it's first (earliest) block likely the block that + // triggered genesis. + // + // In order to vote we need to be able to go back at least 2 * `ETH1_FOLLOW_DISTANCE` + // from the genesis-triggering block. Presently the block cache does not support + // importing blocks with decreasing block numbers, it only accepts them in increasing + // order. If this turns out to be a bottleneck we can update the block cache to allow + // adding earlier blocks too. + eth1_service_from_genesis.drop_block_cache(); + CachingEth1Backend::from_service(eth1_service_from_genesis, store) } else { CachingEth1Backend::new(config, context.log, store) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index f94ea5ed55..330aadbc37 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,4 +1,3 @@ -use clap::ArgMatches; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; use std::fs; @@ -25,6 +24,11 @@ pub enum ClientGenesis { DepositContract, /// Loads the genesis state from a SSZ-encoded `BeaconState` file. SszFile { path: PathBuf }, + /// Loads the genesis state from SSZ-encoded `BeaconState` bytes. + /// + /// We include the bytes instead of the `BeaconState` because the `EthSpec` type + /// parameter would be very annoying. + SszBytes { genesis_state_bytes: Vec }, /// Connects to another Lighthouse instance and reads the genesis state and other data via the /// HTTP API. RemoteNode { server: String, port: Option }, @@ -40,9 +44,10 @@ impl Default for ClientGenesis { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { pub data_dir: PathBuf, + pub testnet_dir: Option, pub db_type: String, - db_name: String, - freezer_db_path: Option, + pub db_name: String, + pub freezer_db_path: Option, pub log_file: PathBuf, pub spec_constants: String, /// If true, the node will use co-ordinated junk for eth1 values. @@ -64,12 +69,13 @@ impl Default for Config { fn default() -> Self { Self { data_dir: PathBuf::from(".lighthouse"), + testnet_dir: None, log_file: PathBuf::from(""), db_type: "disk".to_string(), db_name: "chain_db".to_string(), freezer_db_path: None, genesis: <_>::default(), - network: NetworkConfig::new(), + network: NetworkConfig::default(), rest_api: <_>::default(), websocket_server: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), @@ -135,26 +141,6 @@ impl Config { .ok_or_else(|| "Unable to locate user home directory".to_string())?; ensure_dir_exists(path) } - - /// Apply the following arguments to `self`, replacing values if they are specified in `args`. - /// - /// Returns an error if arguments are obviously invalid. May succeed even if some values are - /// invalid. - pub fn apply_cli_args(&mut self, args: &ArgMatches, _log: &slog::Logger) -> Result<(), String> { - if let Some(dir) = args.value_of("datadir") { - self.data_dir = PathBuf::from(dir); - }; - - if let Some(freezer_dir) = args.value_of("freezer-dir") { - self.freezer_db_path = Some(PathBuf::from(freezer_dir)); - } - - self.network.apply_cli_args(args)?; - self.rest_api.apply_cli_args(args)?; - self.websocket_server.apply_cli_args(args)?; - - Ok(()) - } } /// Ensure that the directory at `path` exists, by creating it and all parents if necessary. diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index 1a6464ca76..a15ef86a6b 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -54,6 +54,21 @@ impl BlockCache { self.blocks.is_empty() } + /// Returns the timestamp of the earliest block in the cache (if any). + pub fn earliest_block_timestamp(&self) -> Option { + self.blocks.first().map(|block| block.timestamp) + } + + /// Returns the timestamp of the latest block in the cache (if any). + pub fn latest_block_timestamp(&self) -> Option { + self.blocks.last().map(|block| block.timestamp) + } + + /// Returns the lowest block number stored. + pub fn lowest_block_number(&self) -> Option { + self.blocks.first().map(|block| block.number) + } + /// Returns the highest block number stored. pub fn highest_block_number(&self) -> Option { self.blocks.last().map(|block| block.number) diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index 404e357d19..f28ca74a3c 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -23,7 +23,7 @@ use types::Hash256; pub const DEPOSIT_EVENT_TOPIC: &str = "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"; /// `keccak("get_deposit_root()")[0..4]` -pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0x863a311b"; +pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0xc5f2892f"; /// `keccak("get_deposit_count()")[0..4]` pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130"; @@ -137,19 +137,21 @@ pub fn get_deposit_count( block_number, timeout, ) - .and_then(|result| result.ok_or_else(|| "No response to deposit count".to_string())) - .and_then(|bytes| { - if bytes.is_empty() { - Ok(None) - } else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES { - let mut array = [0; 8]; - array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]); - Ok(Some(u64::from_le_bytes(array))) - } else { - Err(format!( - "Deposit count response was not {} bytes: {:?}", - DEPOSIT_COUNT_RESPONSE_BYTES, bytes - )) + .and_then(|result| match result { + None => Err(format!("Deposit root response was none")), + Some(bytes) => { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES { + let mut array = [0; 8]; + array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]); + Ok(Some(u64::from_le_bytes(array))) + } else { + Err(format!( + "Deposit count response was not {} bytes: {:?}", + DEPOSIT_COUNT_RESPONSE_BYTES, bytes + )) + } } }) } @@ -172,17 +174,19 @@ pub fn get_deposit_root( block_number, timeout, ) - .and_then(|result| result.ok_or_else(|| "No response to deposit root".to_string())) - .and_then(|bytes| { - if bytes.is_empty() { - Ok(None) - } else if bytes.len() == DEPOSIT_ROOT_BYTES { - Ok(Some(Hash256::from_slice(&bytes))) - } else { - Err(format!( - "Deposit root response was not {} bytes: {:?}", - DEPOSIT_ROOT_BYTES, bytes - )) + .and_then(|result| match result { + None => Err(format!("Deposit root response was none")), + Some(bytes) => { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_ROOT_BYTES { + Ok(Some(Hash256::from_slice(&bytes))) + } else { + Err(format!( + "Deposit root response was not {} bytes: {:?}", + DEPOSIT_ROOT_BYTES, bytes + )) + } } }) } @@ -369,12 +373,18 @@ pub fn send_rpc_request( /// Accepts an entire HTTP body (as a string) and returns the `result` field, as a serde `Value`. fn response_result(response: &str) -> Result, String> { - Ok(serde_json::from_str::(&response) - .map_err(|e| format!("Failed to parse response: {:?}", e))? - .get("result") - .cloned() - .map(Some) - .unwrap_or_else(|| None)) + let json = serde_json::from_str::(&response) + .map_err(|e| format!("Failed to parse response: {:?}", e))?; + + if let Some(error) = json.get("error") { + Err(format!("Eth1 node returned error: {}", error)) + } else { + Ok(json + .get("result") + .cloned() + .map(Some) + .unwrap_or_else(|| None)) + } } /// Parses a `0x`-prefixed, **big-endian** hex string as a u64. diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 5ec89d3bf2..efa402884b 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -121,7 +121,7 @@ impl Default for Config { lowest_cached_block_number: 0, follow_distance: 128, block_cache_truncation: Some(4_096), - auto_update_interval_millis: 500, + auto_update_interval_millis: 7_000, blocks_per_log_query: 1_000, max_log_requests_per_update: None, max_blocks_per_update: None, @@ -163,6 +163,26 @@ impl Service { &self.inner.deposit_cache } + /// Drop the block cache, replacing it with an empty one. + pub fn drop_block_cache(&self) { + *(self.inner.block_cache.write()) = BlockCache::default(); + } + + /// Returns the timestamp of the earliest block in the cache (if any). + pub fn earliest_block_timestamp(&self) -> Option { + self.inner.block_cache.read().earliest_block_timestamp() + } + + /// Returns the timestamp of the latest block in the cache (if any). + pub fn latest_block_timestamp(&self) -> Option { + self.inner.block_cache.read().latest_block_timestamp() + } + + /// Returns the lowest block number stored. + pub fn lowest_block_number(&self) -> Option { + self.inner.block_cache.read().lowest_block_number() + } + /// Returns the number of currently cached blocks. pub fn block_cache_len(&self) -> usize { self.blocks().read().len() @@ -220,6 +240,8 @@ impl Service { { let log_a = self.log.clone(); let log_b = self.log.clone(); + let inner_1 = self.inner.clone(); + let inner_2 = self.inner.clone(); let deposit_future = self .update_deposit_cache() @@ -229,6 +251,7 @@ impl Service { Ok(DepositCacheUpdateOutcome::Success { logs_imported }) => trace!( log_a, "Updated eth1 deposit cache"; + "cached_deposits" => inner_1.deposit_cache.read().cache.len(), "logs_imported" => logs_imported, ), Err(e) => error!( @@ -252,6 +275,7 @@ impl Service { }) => trace!( log_b, "Updated eth1 block cache"; + "cached_blocks" => inner_2.block_cache.read().len(), "blocks_imported" => blocks_imported, "head_block" => head_block_number, ), diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 177be2fb8b..11ece8cd00 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -5,7 +5,6 @@ authors = ["Age Manning "] edition = "2018" [dependencies] -clap = "2.33.0" hex = "0.3" # rust-libp2p is presently being sourced from a Sigma Prime fork of the # `libp2p/rust-libp2p` repository. diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 39391e5fd6..2eb7b78da8 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -1,4 +1,3 @@ -use clap::ArgMatches; use enr::Enr; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; use libp2p::Multiaddr; @@ -97,84 +96,3 @@ impl Default for Config { } } } - -/// Generates a default Config. -impl Config { - pub fn new() -> Self { - Config::default() - } - - pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), String> { - // If a `datadir` has been specified, set the network dir to be inside it. - if let Some(dir) = args.value_of("datadir") { - self.network_dir = PathBuf::from(dir).join("network"); - }; - - // If a network dir has been specified, override the `datadir` definition. - if let Some(dir) = args.value_of("network-dir") { - self.network_dir = PathBuf::from(dir); - }; - - if let Some(listen_address_str) = args.value_of("listen-address") { - let listen_address = listen_address_str - .parse() - .map_err(|_| format!("Invalid listen address: {:?}", listen_address_str))?; - self.listen_address = listen_address; - self.discovery_address = listen_address; - } - - if let Some(max_peers_str) = args.value_of("maxpeers") { - self.max_peers = max_peers_str - .parse::() - .map_err(|_| format!("Invalid number of max peers: {}", max_peers_str))?; - } - - if let Some(port_str) = args.value_of("port") { - let port = port_str - .parse::() - .map_err(|_| format!("Invalid port: {}", port_str))?; - self.libp2p_port = port; - self.discovery_port = port; - } - - if let Some(boot_enr_str) = args.value_of("boot-nodes") { - self.boot_nodes = boot_enr_str - .split(',') - .map(|enr| enr.parse().map_err(|_| format!("Invalid ENR: {}", enr))) - .collect::, _>>()?; - } - - if let Some(libp2p_addresses_str) = args.value_of("libp2p-addresses") { - self.libp2p_nodes = libp2p_addresses_str - .split(',') - .map(|multiaddr| { - multiaddr - .parse() - .map_err(|_| format!("Invalid Multiaddr: {}", multiaddr)) - }) - .collect::, _>>()?; - } - - if let Some(topics_str) = args.value_of("topics") { - self.topics = topics_str.split(',').map(|s| s.into()).collect(); - } - - if let Some(discovery_address_str) = args.value_of("discovery-address") { - self.discovery_address = discovery_address_str - .parse() - .map_err(|_| format!("Invalid discovery address: {:?}", discovery_address_str))? - } - - if let Some(disc_port_str) = args.value_of("disc-port") { - self.discovery_port = disc_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", disc_port_str))?; - } - - if let Some(p2p_priv_key) = args.value_of("p2p-priv-key") { - self.secret_key_hex = Some(p2p_priv_key.to_string()); - } - - Ok(()) - } -} diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 71632b2613..85f7de8d4e 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -8,7 +8,7 @@ use futures::{ Future, }; use parking_lot::Mutex; -use slog::{debug, error, info, Logger}; +use slog::{debug, error, info, trace, Logger}; use state_processing::{ initialize_beacon_state_from_eth1, is_valid_genesis_state, per_block_processing::process_deposit, process_activations, @@ -37,7 +37,29 @@ pub struct Eth1GenesisService { impl Eth1GenesisService { /// Creates a new service. Does not attempt to connect to the Eth1 node. + /// + /// Modifies the given `config` to make it more suitable to the task of listening to genesis. pub fn new(config: Eth1Config, log: Logger) -> Self { + let config = Eth1Config { + // Truncating the block cache makes searching for genesis more + // complicated. + block_cache_truncation: None, + // Scan large ranges of blocks when awaiting genesis. + blocks_per_log_query: 1_000, + // Only perform a few log requests each time the eth1 node is polled. + // + // For small testnets this makes finding genesis much faster, + // as it usually happens within 1,000 blocks. + max_log_requests_per_update: Some(5), + // Only perform a few logs requests each time the eth1 node is polled. + // + // For small testnets, this is much faster as they do not have + // a `MIN_GENESIS_SECONDS`, so after `MIN_GENESIS_VALIDATOR_COUNT` + // has been reached only a single block needs to be read. + max_blocks_per_update: Some(5), + ..config + }; + Self { core: Service::new(config, log), highest_processed_block: Arc::new(Mutex::new(None)), @@ -81,6 +103,7 @@ impl Eth1GenesisService { let service_4 = service.clone(); let log = service.core.log.clone(); let min_genesis_active_validator_count = spec.min_genesis_active_validator_count; + let min_genesis_time = spec.min_genesis_time; Delay::new(Instant::now() + update_interval) .map_err(|e| format!("Delay between genesis deposit checks failed: {:?}", e)) @@ -161,6 +184,9 @@ impl Eth1GenesisService { debug!( service_4.core.log, "No eth1 genesis block found"; + "latest_block_timestamp" => service_4.core.latest_block_timestamp(), + "min_genesis_time" => min_genesis_time, + "min_validator_count" => min_genesis_active_validator_count, "cached_blocks" => service_4.core.block_cache_len(), "cached_deposits" => service_4.core.deposit_cache_len(), "cache_head" => service_4.highest_known_block(), @@ -205,23 +231,35 @@ impl Eth1GenesisService { .filter(|block| { self.highest_known_block() .map(|n| block.number <= n) - .unwrap_or_else(|| false) + .unwrap_or_else(|| true) }) .find(|block| { let mut highest_processed_block = self.highest_processed_block.lock(); + let block_number = block.number; let next_new_block_number = highest_processed_block.map(|n| n + 1).unwrap_or_else(|| 0); - if block.number < next_new_block_number { + if block_number < next_new_block_number { return false; } - self.is_valid_genesis_eth1_block::(block, &spec) + self.is_valid_genesis_eth1_block::(block, &spec, &self.core.log) .and_then(|val| { *highest_processed_block = Some(block.number); Ok(val) }) + .map(|is_valid| { + if !is_valid { + info!( + self.core.log, + "Inspected new eth1 block"; + "msg" => "did not trigger genesis", + "block_number" => block_number + ); + }; + is_valid + }) .unwrap_or_else(|_| { error!( self.core.log, @@ -301,6 +339,7 @@ impl Eth1GenesisService { &self, target_block: &Eth1Block, spec: &ChainSpec, + log: &Logger, ) -> Result { if target_block.timestamp < spec.min_genesis_time { Ok(false) @@ -345,8 +384,16 @@ impl Eth1GenesisService { })?; process_activations(&mut local_state, spec); + let is_valid = is_valid_genesis_state(&local_state, spec); - Ok(is_valid_genesis_state(&local_state, spec)) + trace!( + log, + "Eth1 block inspected for genesis"; + "active_validators" => local_state.get_active_validator_indices(local_state.current_epoch()).len(), + "validators" => local_state.validators.len() + ); + + Ok(is_valid) } } diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index f9370e335d..7baa4dcddf 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -22,7 +22,6 @@ eth2_ssz = { path = "../../eth2/utils/ssz" } eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } -clap = "2.33" http = "0.1" hyper = "0.12" exit-future = "0.1.4" diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 9621dce459..a73ade89b6 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -56,6 +56,29 @@ pub fn get_head( ResponseBuilder::new(&req)?.body(&head) } +#[derive(Serialize, Deserialize, Encode)] +pub struct HeadBeaconBlock { + beacon_block_root: Hash256, + beacon_block_slot: Slot, +} + +/// HTTP handler to return a list of head BeaconBlocks. +pub fn get_heads( + req: Request, + beacon_chain: Arc>, +) -> ApiResult { + let heads = beacon_chain + .heads() + .into_iter() + .map(|(beacon_block_root, beacon_block_slot)| HeadBeaconBlock { + beacon_block_root, + beacon_block_slot, + }) + .collect::>(); + + ResponseBuilder::new(&req)?.body(&heads) +} + #[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct BlockResponse { diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs index 1d7f50b6ee..9b99500e3d 100644 --- a/beacon_node/rest_api/src/config.rs +++ b/beacon_node/rest_api/src/config.rs @@ -1,4 +1,3 @@ -use clap::ArgMatches; use serde::{Deserialize, Serialize}; use std::net::Ipv4Addr; @@ -50,25 +49,3 @@ impl Default for Config { } } } - -impl Config { - pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("no-api") { - self.enabled = false; - } - - if let Some(rpc_address) = args.value_of("api-address") { - self.listen_address = rpc_address - .parse::() - .map_err(|_| "api-address is not a valid IPv4 address.")?; - } - - if let Some(rpc_port) = args.value_of("api-port") { - self.port = rpc_port - .parse::() - .map_err(|_| "api-port is not a valid u16.")?; - } - - Ok(()) - } -} diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs index 35bde8eed9..03cc35f959 100644 --- a/beacon_node/rest_api/src/router.rs +++ b/beacon_node/rest_api/src/router.rs @@ -64,6 +64,9 @@ pub fn route( // Methods for Beacon Node (&Method::GET, "/beacon/head") => into_boxfut(beacon::get_head::(req, beacon_chain)), + (&Method::GET, "/beacon/heads") => { + into_boxfut(beacon::get_heads::(req, beacon_chain)) + } (&Method::GET, "/beacon/block") => { into_boxfut(beacon::get_block::(req, beacon_chain)) } @@ -154,7 +157,7 @@ pub fn route( // (e.g., a response with a 404 or 500 status). request_result.then(move |result| match result { Ok(response) => { - debug!(local_log, "Request successful: {:?}", path); + debug!(local_log, "HTTP API request successful"; "path" => path); metrics::inc_counter(&metrics::SUCCESS_COUNT); metrics::stop_timer(timer); @@ -163,7 +166,7 @@ pub fn route( Err(e) => { let error_response = e.into(); - debug!(local_log, "Request failure: {:?}", path); + debug!(local_log, "HTTP API request failure"; "path" => path); metrics::stop_timer(timer); Ok(error_response) diff --git a/beacon_node/rest_api/tests/test.rs b/beacon_node/rest_api/tests/test.rs index 0fabfd08ff..7937c9bc34 100644 --- a/beacon_node/rest_api/tests/test.rs +++ b/beacon_node/rest_api/tests/test.rs @@ -3,7 +3,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use node_test_rig::{ environment::{Environment, EnvironmentBuilder}, - testing_client_config, ClientGenesis, LocalBeaconNode, + testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, }; use remote_beacon_node::{PublishStatus, ValidatorDuty}; use std::sync::Arc; @@ -26,6 +26,13 @@ fn build_env() -> Environment { .expect("environment should build") } +fn build_node(env: &mut Environment, config: ClientConfig) -> LocalBeaconNode { + let context = env.core_context(); + env.runtime() + .block_on(LocalBeaconNode::production(context, config)) + .expect("should block until node created") +} + /// Returns the randao reveal for the given slot (assuming the given `beacon_chain` uses /// deterministic keypairs). fn get_randao_reveal( @@ -64,7 +71,7 @@ fn validator_produce_attestation() { let spec = &E::default_spec(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let beacon_chain = node @@ -160,7 +167,7 @@ fn validator_duties_bulk() { let spec = &E::default_spec(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let beacon_chain = node @@ -197,7 +204,7 @@ fn validator_duties() { let spec = &E::default_spec(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let beacon_chain = node @@ -321,7 +328,7 @@ fn validator_block_post() { genesis_time: 13_371_337, }; - let node = LocalBeaconNode::production(env.core_context(), config); + let node = build_node(&mut env, config); let remote_node = node.remote_node().expect("should produce remote node"); let beacon_chain = node @@ -387,7 +394,7 @@ fn validator_block_get() { let spec = &E::default_spec(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let beacon_chain = node @@ -425,7 +432,7 @@ fn validator_block_get() { fn beacon_state() { let mut env = build_env(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let (state_by_slot, root) = env @@ -469,7 +476,7 @@ fn beacon_state() { fn beacon_block() { let mut env = build_env(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let (block_by_slot, root) = env @@ -513,7 +520,7 @@ fn beacon_block() { fn genesis_time() { let mut env = build_env(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let genesis_time = env @@ -537,7 +544,7 @@ fn genesis_time() { fn fork() { let mut env = build_env(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let fork = env @@ -561,7 +568,7 @@ fn fork() { fn eth2_config() { let mut env = build_env(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let eth2_config = env @@ -585,7 +592,7 @@ fn eth2_config() { fn get_version() { let mut env = build_env(); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env, testing_client_config()); let remote_node = node.remote_node().expect("should produce remote node"); let version = env diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 181080b17a..649520c191 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -13,9 +13,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("network-dir") .long("network-dir") .value_name("DIR") - .help("Data directory for network keys.") + .help("Data directory for network keys. Defaults to network/ inside the beacon node \ + dir.") .takes_value(true) - .global(true) ) .arg( Arg::with_name("freezer-dir") @@ -23,26 +23,33 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("DIR") .help("Data directory for the freezer database.") .takes_value(true) - .global(true) + ) + .arg( + Arg::with_name("testnet-dir") + .long("testnet-dir") + .value_name("DIR") + .help("Path to directory containing eth2_testnet specs. Defaults to \ + a hard-coded Lighthouse testnet. Only effective if there is no \ + existing database.") + .takes_value(true) ) /* * Network parameters. */ .arg( - Arg::with_name("port-bump") - .long("port-bump") - .short("b") - .value_name("INCREMENT") - .help("Sets all listening TCP/UDP ports to default values, but with each port increased by \ - INCREMENT. Useful when starting multiple nodes on a single machine. Using increments \ - in multiples of 10 is recommended.") - .takes_value(true), + Arg::with_name("zero-ports") + .long("zero-ports") + .short("z") + .help("Sets all listening TCP/UDP ports to 0, allowing the OS to choose some \ + arbitrary free ports.") + .takes_value(false), ) .arg( Arg::with_name("listen-address") .long("listen-address") .value_name("ADDRESS") - .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") + .help("The address lighthouse will listen for UDP and TCP connections.") + .default_value("0.0.0.0") .takes_value(true) ) .arg( @@ -50,13 +57,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("port") .value_name("PORT") .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") - .conflicts_with("port-bump") + .default_value("9000") .takes_value(true), ) .arg( Arg::with_name("maxpeers") .long("maxpeers") - .help("The maximum number of peers (default 10).") + .help("The maximum number of peers.") + .default_value("10") .takes_value(true), ) .arg( @@ -72,64 +80,70 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("disc-port") .value_name("PORT") .help("The discovery UDP port.") - .conflicts_with("port-bump") + .default_value("9000") .takes_value(true), ) .arg( Arg::with_name("discovery-address") .long("discovery-address") .value_name("ADDRESS") - .help("The IP address to broadcast to other peers on how to reach this node.") + .help("The IP address to broadcast to other peers on how to reach this node. \ + Default is determined automatically.") .takes_value(true), ) .arg( Arg::with_name("topics") .long("topics") .value_name("STRING") - .help("One or more comma-delimited gossipsub topic strings to subscribe to.") + .help("One or more comma-delimited gossipsub topic strings to subscribe to. Default \ + is determined automatically.") .takes_value(true), ) .arg( Arg::with_name("libp2p-addresses") .long("libp2p-addresses") .value_name("MULTIADDR") - .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR.") + .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer \ + without an ENR.") .takes_value(true), ) .arg( Arg::with_name("p2p-priv-key") .long("p2p-priv-key") .value_name("HEX") - .help("A secp256k1 secret key, represented as ASCII-encoded hex bytes (with or without 0x prefix).") + .help("A secp256k1 secret key, represented as ASCII-encoded hex bytes (with or \ + without 0x prefix). Default is either loaded from disk or generated \ + automatically.") .takes_value(true), ) /* REST API related arguments */ .arg( - Arg::with_name("no-api") - .long("no-api") - .help("Disable RESTful HTTP API server.") + Arg::with_name("http") + .long("http") + .help("Enable RESTful HTTP API server. Disabled by default.") .takes_value(false), ) .arg( - Arg::with_name("api-address") - .long("api-address") + Arg::with_name("http-address") + .long("http-address") .value_name("ADDRESS") .help("Set the listen address for the RESTful HTTP API server.") + .default_value("127.0.0.1") .takes_value(true), ) .arg( - Arg::with_name("api-port") - .long("api-port") + Arg::with_name("http-port") + .long("http-port") .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") - .conflicts_with("port-bump") + .default_value("5052") .takes_value(true), ) /* Websocket related arguments */ .arg( - Arg::with_name("no-ws") - .long("no-ws") - .help("Disable websocket server.") + Arg::with_name("ws") + .long("ws") + .help("Enable the websocket server. Disabled by default.") .takes_value(false), ) .arg( @@ -137,7 +151,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("ws-address") .value_name("ADDRESS") .help("Set the listen address for the websocket server.") - .conflicts_with_all(&["no-ws"]) + .default_value("127.0.0.1") .takes_value(true), ) .arg( @@ -145,16 +159,24 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("ws-port") .value_name("PORT") .help("Set the listen TCP port for the websocket server.") - .conflicts_with_all(&["no-ws", "port-bump"]) + .default_value("5053") .takes_value(true), ) /* * Eth1 Integration */ + .arg( + Arg::with_name("eth1") + .long("eth1") + .help("If present the node will connect to an eth1 node. This is required for \ + block production, you must use this flag if you wish to serve a validator.") + .takes_value(false), + ) .arg( Arg::with_name("dummy-eth1") .long("dummy-eth1") + .conflicts_with("eth1") .help("If present, uses an eth1 backend that generates static dummy data.\ Identical to the method used at the 2019 Canada interop.") ) @@ -166,32 +188,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("http://localhost:8545") ) - .arg( - Arg::with_name("eth1-follow") - .long("eth1-follow") - .value_name("BLOCK_COUNT") - .help("Specifies how many blocks we should cache behind the eth1 head. A larger number means a smaller cache.") - .takes_value(true) - // TODO: set this higher once we're not using testnets all the time. - .default_value("0") - ) - .arg( - Arg::with_name("deposit-contract") - .long("deposit-contract") - .short("e") - .value_name("DEPOSIT-CONTRACT") - .help("Specifies the deposit contract address on the Eth1 chain.") - .takes_value(true) - ) - .arg( - Arg::with_name("deposit-contract-deploy") - .long("deposit-contract-deploy") - .value_name("BLOCK_NUMBER") - .help("Specifies the block number that the deposit contract was deployed at.") - .takes_value(true) - // TODO: set this higher once we're not using testnets all the time. - .default_value("0") - ) /* * The "testnet" sub-command. * @@ -199,21 +195,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { */ .subcommand(SubCommand::with_name("testnet") .about("Create a new Lighthouse datadir using a testnet strategy.") - .arg( - Arg::with_name("eth2-config") - .long("eth2-config") - .value_name("TOML_FILE") - .help("A existing eth2_spec TOML file (e.g., eth2_spec.toml).") - .takes_value(true) - .conflicts_with("spec") - ) - .arg( - Arg::with_name("client-config") - .long("client-config") - .value_name("TOML_FILE") - .help("An existing beacon_node TOML file (e.g., beacon_node.toml).") - .takes_value(true) - ) .arg( Arg::with_name("random-datadir") .long("random-datadir") @@ -221,13 +202,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("If present, append a random string to the datadir path. Useful for fast development \ iteration.") ) - .arg( - Arg::with_name("random-propagation") - .long("random-propagation") - .value_name("INTEGER") - .takes_value(true) - .help("Specifies (as a percentage) the likelihood of propagating blocks and attestations. This should only be used for testing networking elements. The value must like in the range 1-100.") - ) .arg( Arg::with_name("force") .long("force") @@ -236,33 +210,22 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { backup directory.") .conflicts_with("random-datadir") ) + .arg( + Arg::with_name("random-propagation") + .long("random-propagation") + .value_name("INTEGER") + .takes_value(true) + .help("Specifies (as a percentage) the likelihood of propagating blocks and \ + attestations. This should only be used for testing networking elements. The \ + value must like in the range 1-100. Default is 100.") + ) .arg( Arg::with_name("slot-time") .long("slot-time") .short("t") .value_name("MILLISECONDS") - .help("Defines the slot time when creating a new testnet.") - ) - /* - * `boostrap` - * - * Start a new node by downloading genesis and network info from another node via the - * HTTP API. - */ - .subcommand(SubCommand::with_name("bootstrap") - .about("Connects to the given HTTP server, downloads a genesis state and attempts to peer with it.") - .arg(Arg::with_name("server") - .value_name("HTTP_SERVER") - .required(true) - .default_value("http://localhost:5052") - .help("A HTTP server, with a http:// prefix")) - .arg(Arg::with_name("libp2p-port") - .short("p") - .long("port") - .value_name("TCP_PORT") - .help("A libp2p listen port used to peer with the bootstrap server. This flag is useful \ - when port-fowarding is used: you may connect using a different port than \ - the one the server is immediately listening on.")) + .help("Defines the slot time when creating a new testnet. The default is \ + specified by the spec.") ) /* * `recent` @@ -326,7 +289,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { * Connect to the Prysmatic Labs testnet. */ .subcommand(SubCommand::with_name("prysm") - .about("Connect to the Prysmatic Labs testnet on Goerli.") + .about("Connect to the Prysmatic Labs testnet on Goerli. Not guaranteed to be \ + up-to-date or functioning.") ) ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 7ddd9cd26a..75171c759e 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,18 +1,21 @@ use clap::ArgMatches; use client::{ClientConfig, ClientGenesis, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; +use eth2_libp2p::{Enr, Multiaddr}; +use eth2_testnet_config::Eth2TestnetConfig; use genesis::recent_genesis_time; -use lighthouse_bootstrap::Bootstrapper; use rand::{distributions::Alphanumeric, Rng}; -use slog::{crit, info, warn, Logger}; +use slog::{crit, info, Logger}; +use ssz::Encode; use std::fs; use std::net::Ipv4Addr; -use std::path::{Path, PathBuf}; -use types::{Address, Epoch, Fork}; +use std::path::PathBuf; +use types::{Epoch, EthSpec, Fork}; -pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; +pub const BEACON_NODE_DIR: &str = "beacon"; +pub const NETWORK_DIR: &str = "network"; type Result = std::result::Result; type Config = (ClientConfig, Eth2Config, Logger); @@ -24,89 +27,391 @@ type Config = (ClientConfig, Eth2Config, Logger); /// The output of this function depends primarily upon the given `cli_args`, however it's behaviour /// may be influenced by other external services like the contents of the file system or the /// response of some remote server. -pub fn get_configs(cli_args: &ArgMatches, core_log: Logger) -> Result { +pub fn get_configs( + cli_args: &ArgMatches, + mut eth2_config: Eth2Config, + core_log: Logger, +) -> Result { let log = core_log.clone(); - let mut builder = ConfigBuilder::new(cli_args, core_log)?; + let mut client_config = ClientConfig::default(); + // Read the `--datadir` flag. + // + // If it's not present, try and find the home directory (`~`) and push the default data + // directory onto it. + client_config.data_dir = cli_args + .value_of("datadir") + .map(PathBuf::from) + .or_else(|| dirs::home_dir().map(|home| home.join(".lighthouse").join(BEACON_NODE_DIR))) + .unwrap_or_else(|| PathBuf::from(".")); + + // Load the client config, if it exists . + let path = client_config.data_dir.join(CLIENT_CONFIG_FILENAME); + if path.exists() { + client_config = read_from_file(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; + } + + // Load the eth2 config, if it exists . + let path = client_config.data_dir.join(ETH2_CONFIG_FILENAME); + if path.exists() { + eth2_config = read_from_file(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; + } + + // Read the `--testnet-dir` flag. + if let Some(val) = cli_args.value_of("testnet-dir") { + client_config.testnet_dir = Some(PathBuf::from(val)); + } + + /* + * Networking + */ + // If a network dir has been specified, override the `datadir` definition. + if let Some(dir) = cli_args.value_of("network-dir") { + client_config.network.network_dir = PathBuf::from(dir); + } else { + client_config.network.network_dir = client_config.data_dir.join(NETWORK_DIR); + }; + + if let Some(listen_address_str) = cli_args.value_of("listen-address") { + let listen_address = listen_address_str + .parse() + .map_err(|_| format!("Invalid listen address: {:?}", listen_address_str))?; + client_config.network.listen_address = listen_address; + client_config.network.discovery_address = listen_address; + } + + if let Some(max_peers_str) = cli_args.value_of("maxpeers") { + client_config.network.max_peers = max_peers_str + .parse::() + .map_err(|_| format!("Invalid number of max peers: {}", max_peers_str))?; + } + + if let Some(port_str) = cli_args.value_of("port") { + let port = port_str + .parse::() + .map_err(|_| format!("Invalid port: {}", port_str))?; + client_config.network.libp2p_port = port; + client_config.network.discovery_port = port; + } + + if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { + client_config.network.boot_nodes = boot_enr_str + .split(',') + .map(|enr| enr.parse().map_err(|_| format!("Invalid ENR: {}", enr))) + .collect::>>()?; + } + + if let Some(libp2p_addresses_str) = cli_args.value_of("libp2p-addresses") { + client_config.network.libp2p_nodes = libp2p_addresses_str + .split(',') + .map(|multiaddr| { + multiaddr + .parse() + .map_err(|_| format!("Invalid Multiaddr: {}", multiaddr)) + }) + .collect::>>()?; + } + + if let Some(topics_str) = cli_args.value_of("topics") { + client_config.network.topics = topics_str.split(',').map(|s| s.into()).collect(); + } + + if let Some(discovery_address_str) = cli_args.value_of("discovery-address") { + client_config.network.discovery_address = discovery_address_str + .parse() + .map_err(|_| format!("Invalid discovery address: {:?}", discovery_address_str))? + } + + if let Some(disc_port_str) = cli_args.value_of("disc-port") { + client_config.network.discovery_port = disc_port_str + .parse::() + .map_err(|_| format!("Invalid discovery port: {}", disc_port_str))?; + } + + if let Some(p2p_priv_key) = cli_args.value_of("p2p-priv-key") { + client_config.network.secret_key_hex = Some(p2p_priv_key.to_string()); + } + + /* + * Http server + */ + + if cli_args.is_present("http") { + client_config.rest_api.enabled = true; + } + + if let Some(address) = cli_args.value_of("http-address") { + client_config.rest_api.listen_address = address + .parse::() + .map_err(|_| "http-address is not a valid IPv4 address.")?; + } + + if let Some(port) = cli_args.value_of("http-port") { + client_config.rest_api.port = port + .parse::() + .map_err(|_| "http-port is not a valid u16.")?; + } + + /* + * Websocket server + */ + + if cli_args.is_present("ws") { + client_config.websocket_server.enabled = true; + } + + if let Some(address) = cli_args.value_of("ws-address") { + client_config.websocket_server.listen_address = address + .parse::() + .map_err(|_| "ws-address is not a valid IPv4 address.")?; + } + + if let Some(port) = cli_args.value_of("ws-port") { + client_config.websocket_server.port = port + .parse::() + .map_err(|_| "ws-port is not a valid u16.")?; + } + + /* + * Eth1 + */ + + // When present, use an eth1 backend that generates deterministic junk. + // + // Useful for running testnets without the overhead of a deposit contract. if cli_args.is_present("dummy-eth1") { - builder.client_config.dummy_eth1_backend = true; + client_config.dummy_eth1_backend = true; } + // When present, attempt to sync to an eth1 node. + // + // Required for block production. + if cli_args.is_present("eth1") { + client_config.sync_eth1_chain = true; + } + + // Defines the URL to reach the eth1 node. if let Some(val) = cli_args.value_of("eth1-endpoint") { - builder.set_eth1_endpoint(val) - } - - if let Some(val) = cli_args.value_of("deposit-contract") { - builder.set_deposit_contract( - val.parse::
() - .map_err(|e| format!("Unable to parse deposit-contract address: {:?}", e))?, - ) - } - - if let Some(val) = cli_args.value_of("deposit-contract-deploy") { - builder.set_deposit_contract_deploy_block( - val.parse::() - .map_err(|e| format!("Unable to parse deposit-contract-deploy: {:?}", e))?, - ) - } - - if let Some(val) = cli_args.value_of("eth1-follow") { - builder.set_eth1_follow( - val.parse::() - .map_err(|e| format!("Unable to parse follow distance: {:?}", e))?, - ) + client_config.eth1.endpoint = val.to_string(); } match cli_args.subcommand() { ("testnet", Some(sub_cmd_args)) => { - process_testnet_subcommand(&mut builder, sub_cmd_args, &log)? + process_testnet_subcommand(&mut client_config, &mut eth2_config, sub_cmd_args)? } // No sub-command assumes a resume operation. _ => { - info!( - log, - "Resuming from existing datadir"; - "path" => format!("{:?}", builder.client_config.data_dir) - ); - // If no primary subcommand was given, start the beacon chain from an existing // database. - builder.set_genesis(ClientGenesis::Resume); + client_config.genesis = ClientGenesis::Resume; // Whilst there is no large testnet or mainnet force the user to specify how they want // to start a new chain (e.g., from a genesis YAML file, another node, etc). - if !builder.client_config.data_dir.exists() { - return Err( - "No datadir found. To start a new beacon chain, see `testnet --help`. \ - Use `--datadir` to specify a different directory" - .into(), + if !client_config.data_dir.exists() { + info!( + log, + "Starting from an empty database"; + "data_dir" => format!("{:?}", client_config.data_dir) ); + init_new_client::(&mut client_config, &mut eth2_config)? + } else { + info!( + log, + "Resuming from existing datadir"; + "data_dir" => format!("{:?}", client_config.data_dir) + ); + // If the `testnet` command was not provided, attempt to load an existing datadir and + // continue with an existing chain. + load_from_datadir(&mut client_config)? } - - // If the `testnet` command was not provided, attempt to load an existing datadir and - // continue with an existing chain. - builder.load_from_datadir()?; } }; - builder.build(cli_args) + if let Some(freezer_dir) = cli_args.value_of("freezer-dir") { + client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); + } + + if eth2_config.spec_constants != client_config.spec_constants { + crit!(log, "Specification constants do not match."; + "client_config" => client_config.spec_constants.to_string(), + "eth2_config" => eth2_config.spec_constants.to_string() + ); + return Err("Specification constant mismatch".into()); + } + + /* + * Zero-ports + * + * Replaces previously set flags. + */ + if cli_args.is_present("zero-ports") { + client_config.network.libp2p_port = 0; + client_config.network.discovery_port = 0; + client_config.rest_api.port = 0; + client_config.websocket_server.port = 0; + } + + Ok((client_config, eth2_config, log)) +} + +/// Load from an existing database. +fn load_from_datadir(client_config: &mut ClientConfig) -> Result<()> { + // Check to ensure the datadir exists. + // + // For now we return an error. In the future we may decide to boot a default (e.g., + // public testnet or mainnet). + if !client_config.get_data_dir().map_or(false, |d| d.exists()) { + return Err( + "No datadir found. Either create a new testnet or specify a different `--datadir`." + .into(), + ); + } + + // If there is a path to a database in the config, ensure it exists. + if !client_config + .get_db_path() + .map_or(false, |path| path.exists()) + { + return Err( + "No database found in datadir. Use 'testnet -f' to overwrite the existing \ + datadir, or specify a different `--datadir`." + .into(), + ); + } + + client_config.genesis = ClientGenesis::Resume; + + Ok(()) +} + +/// Create a new client with the default configuration. +fn init_new_client( + client_config: &mut ClientConfig, + eth2_config: &mut Eth2Config, +) -> Result<()> { + let eth2_testnet_config: Eth2TestnetConfig = + if let Some(testnet_dir) = &client_config.testnet_dir { + Eth2TestnetConfig::load(testnet_dir.clone()) + .map_err(|e| format!("Unable to open testnet dir at {:?}: {}", testnet_dir, e))? + } else { + Eth2TestnetConfig::hard_coded() + .map_err(|e| format!("Unable to load hard-coded testnet dir: {}", e))? + }; + + eth2_config.spec = eth2_testnet_config + .yaml_config + .as_ref() + .ok_or_else(|| "The testnet directory must contain a spec config".to_string())? + .apply_to_chain_spec::(ð2_config.spec) + .ok_or_else(|| { + format!( + "The loaded config is not compatible with the {} spec", + ð2_config.spec_constants + ) + })?; + + let spec = &mut eth2_config.spec; + + client_config.eth1.deposit_contract_address = + format!("{:?}", eth2_testnet_config.deposit_contract_address()?); + client_config.eth1.deposit_contract_deploy_block = + eth2_testnet_config.deposit_contract_deploy_block; + + client_config.eth1.follow_distance = spec.eth1_follow_distance / 2; + client_config.dummy_eth1_backend = false; + client_config.eth1.lowest_cached_block_number = client_config + .eth1 + .deposit_contract_deploy_block + .saturating_sub(client_config.eth1.follow_distance * 2); + + if let Some(boot_nodes) = eth2_testnet_config.boot_enr { + client_config + .network + .boot_nodes + .append(&mut boot_nodes.clone()) + } + + if let Some(genesis_state) = eth2_testnet_config.genesis_state { + // Note: re-serializing the genesis state is not so efficient, however it avoids adding + // trait bounds to the `ClientGenesis` enum. This would have significant flow-on + // effects. + client_config.genesis = ClientGenesis::SszBytes { + genesis_state_bytes: genesis_state.as_ssz_bytes(), + }; + } else { + client_config.genesis = ClientGenesis::DepositContract; + } + + create_new_datadir(&client_config, ð2_config)?; + + Ok(()) +} + +/// Writes the configs in `self` to `self.data_dir`. +/// +/// Returns an error if `self.data_dir` already exists. +pub fn create_new_datadir(client_config: &ClientConfig, eth2_config: &Eth2Config) -> Result<()> { + if client_config.data_dir.exists() { + return Err(format!( + "Data dir already exists at {:?}", + client_config.data_dir + ))?; + } + + // Create `datadir` and any non-existing parent directories. + fs::create_dir_all(&client_config.data_dir) + .map_err(|e| format!("Failed to create data dir: {}", e))?; + + macro_rules! write_to_file { + ($file: ident, $variable: ident) => { + let file = client_config.data_dir.join($file); + if file.exists() { + return Err(format!("Datadir is not clean, {} exists.", $file)); + } else { + // Write the onfig to a TOML file in the datadir. + write_to_file(client_config.data_dir.join($file), $variable) + .map_err(|e| format!("Unable to write {} file: {:?}", $file, e))?; + } + }; + } + + write_to_file!(CLIENT_CONFIG_FILENAME, client_config); + write_to_file!(ETH2_CONFIG_FILENAME, eth2_config); + + Ok(()) } /// Process the `testnet` CLI subcommand arguments, updating the `builder`. fn process_testnet_subcommand( - builder: &mut ConfigBuilder, + client_config: &mut ClientConfig, + eth2_config: &mut Eth2Config, cli_args: &ArgMatches, - log: &Logger, ) -> Result<()> { + // Specifies that a random datadir should be used. if cli_args.is_present("random-datadir") { - builder.set_random_datadir()?; + client_config + .data_dir + .push(format!("random_{}", random_string(6))); + client_config.network.network_dir = client_config.data_dir.join("network"); } + // Deletes the existing datadir. if cli_args.is_present("force") { - builder.clean_datadir()?; + if client_config.data_dir.exists() { + fs::remove_dir_all(&client_config.data_dir) + .map_err(|e| format!("Unable to delete existing datadir: {:?}", e))?; + } } + // Define a percentage of messages that should be propogated, useful for simulating bad network + // conditions. + // + // WARNING: setting this to anything less than 100 will cause bad behaviour. if let Some(propagation_percentage_string) = cli_args.value_of("random-propagation") { let percentage = propagation_percentage_string .parse::() @@ -114,72 +419,20 @@ fn process_testnet_subcommand( if percentage > 100 { return Err("Propagation percentage greater than 100".to_string()); } - builder.client_config.network.propagation_percentage = Some(percentage); - } - - let is_bootstrap = cli_args.subcommand_name() == Some("bootstrap"); - - if let Some(path_string) = cli_args.value_of("eth2-config") { - if is_bootstrap { - return Err("Cannot supply --eth2-config when using bootstrap".to_string()); - } - - let path = path_string - .parse::() - .map_err(|e| format!("Unable to parse eth2-config path: {:?}", e))?; - builder.load_eth2_config(path)?; - } else { - builder.update_spec_from_subcommand(&cli_args)?; + client_config.network.propagation_percentage = Some(percentage); } + // Modify the `SECONDS_PER_SLOT` "constant". if let Some(slot_time) = cli_args.value_of("slot-time") { - if is_bootstrap { - return Err("Cannot supply --slot-time flag whilst using bootstrap.".into()); - } - let slot_time = slot_time .parse::() .map_err(|e| format!("Unable to parse slot-time: {:?}", e))?; - builder.set_slot_time(slot_time); + eth2_config.spec.milliseconds_per_slot = slot_time; } - if let Some(path_string) = cli_args.value_of("client-config") { - let path = path_string - .parse::() - .map_err(|e| format!("Unable to parse client config path: {:?}", e))?; - builder.load_client_config(path)?; - } - - info!( - log, - "Creating new datadir"; - "path" => format!("{:?}", builder.client_config.data_dir) - ); - - // When using the testnet command we listen on all addresses. - builder.set_listen_addresses("0.0.0.0".into())?; - warn!(log, "All services listening on 0.0.0.0"); - // Start matching on the second subcommand (e.g., `testnet bootstrap ...`). match cli_args.subcommand() { - ("bootstrap", Some(cli_args)) => { - let server = cli_args - .value_of("server") - .ok_or_else(|| "No bootstrap server specified")?; - let port: Option = cli_args - .value_of("libp2p-port") - .and_then(|s| s.parse::().ok()); - - builder.import_bootstrap_libp2p_address(server, port)?; - builder.import_bootstrap_enr_address(server)?; - builder.import_bootstrap_eth2_config(server)?; - - builder.set_genesis(ClientGenesis::RemoteNode { - server: server.to_string(), - port, - }) - } ("recent", Some(cli_args)) => { let validator_count = cli_args .value_of("validator_count") @@ -193,12 +446,12 @@ fn process_testnet_subcommand( .parse::() .map_err(|e| format!("Unable to parse minutes: {:?}", e))?; - builder.client_config.dummy_eth1_backend = true; + client_config.dummy_eth1_backend = true; - builder.set_genesis(ClientGenesis::Interop { + client_config.genesis = ClientGenesis::Interop { validator_count, genesis_time: recent_genesis_time(minutes), - }) + }; } ("quick", Some(cli_args)) => { let validator_count = cli_args @@ -213,12 +466,12 @@ fn process_testnet_subcommand( .parse::() .map_err(|e| format!("Unable to parse genesis time: {:?}", e))?; - builder.client_config.dummy_eth1_backend = true; + client_config.dummy_eth1_backend = true; - builder.set_genesis(ClientGenesis::Interop { + client_config.genesis = ClientGenesis::Interop { validator_count, genesis_time, - }) + }; } ("file", Some(cli_args)) => { let path = cli_args @@ -236,11 +489,10 @@ fn process_testnet_subcommand( other => return Err(format!("Unknown genesis file format: {}", other)), }; - builder.set_genesis(start_method) + client_config.genesis = start_method; } ("prysm", Some(_)) => { - let mut spec = &mut builder.eth2_config.spec; - let mut client_config = &mut builder.client_config; + let mut spec = &mut eth2_config.spec; spec.min_deposit_amount = 100; spec.max_effective_balance = 3_200_000_000; @@ -259,7 +511,7 @@ fn process_testnet_subcommand( client_config.eth1.follow_distance = 16; client_config.dummy_eth1_backend = false; - builder.set_genesis(ClientGenesis::DepositContract) + client_config.genesis = ClientGenesis::DepositContract; } (cmd, Some(_)) => { return Err(format!( @@ -270,369 +522,11 @@ fn process_testnet_subcommand( _ => return Err("No testnet method specified. See 'testnet --help'.".into()), }; - builder.write_configs_to_new_datadir()?; + create_new_datadir(&client_config, ð2_config)?; Ok(()) } -/// Allows for building a set of configurations based upon `clap` arguments. -struct ConfigBuilder { - log: Logger, - pub eth2_config: Eth2Config, - pub client_config: ClientConfig, -} - -impl ConfigBuilder { - /// Create a new builder with default settings. - pub fn new(cli_args: &ArgMatches, log: Logger) -> Result { - // Read the `--datadir` flag. - // - // If it's not present, try and find the home directory (`~`) and push the default data - // directory onto it. - let data_dir: PathBuf = cli_args - .value_of("datadir") - .map(PathBuf::from) - .or_else(|| { - dirs::home_dir().map(|mut home| { - home.push(DEFAULT_DATA_DIR); - home - }) - }) - .ok_or_else(|| "Unable to find a home directory for the datadir".to_string())?; - - let mut client_config = ClientConfig::default(); - client_config.data_dir = data_dir; - - Ok(Self { - log, - eth2_config: Eth2Config::minimal(), - client_config, - }) - } - - /// Clears any configuration files that would interfere with writing new configs. - /// - /// Moves the following files in `data_dir` into a backup directory: - /// - /// - Client config - /// - Eth2 config - /// - All database directories - pub fn clean_datadir(&mut self) -> Result<()> { - let backup_dir = { - let mut s = String::from("backup_"); - s.push_str(&random_string(6)); - self.client_config.data_dir.join(s) - }; - - fs::create_dir_all(&backup_dir) - .map_err(|e| format!("Unable to create config backup dir: {:?}", e))?; - - let move_to_backup_dir = |path: &Path| -> Result<()> { - let file_name = path - .file_name() - .ok_or_else(|| "Invalid path found during datadir clean (no filename).")?; - - let mut new = path.to_path_buf(); - new.pop(); - new.push(backup_dir.clone()); - new.push(file_name); - - let _ = fs::rename(path, new); - - Ok(()) - }; - - move_to_backup_dir(&self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; - move_to_backup_dir(&self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; - move_to_backup_dir(&self.client_config.create_db_path()?)?; - move_to_backup_dir(&self.client_config.create_freezer_db_path()?)?; - - Ok(()) - } - - pub fn set_eth1_endpoint(&mut self, endpoint: &str) { - self.client_config.eth1.endpoint = endpoint.to_string(); - } - - pub fn set_deposit_contract(&mut self, deposit_contract: Address) { - self.client_config.eth1.deposit_contract_address = format!("{:?}", deposit_contract); - } - - pub fn set_deposit_contract_deploy_block(&mut self, eth1_block_number: u64) { - self.client_config.eth1.deposit_contract_deploy_block = eth1_block_number; - } - - pub fn set_eth1_follow(&mut self, distance: u64) { - self.client_config.eth1.follow_distance = distance; - } - - pub fn set_genesis(&mut self, method: ClientGenesis) { - self.client_config.genesis = method; - } - - /// Import the libp2p address for `server` into the list of libp2p nodes to connect with. - /// - /// If `port` is `Some`, it is used as the port for the `Multiaddr`. If `port` is `None`, - /// attempts to connect to the `server` via HTTP and retrieve it's libp2p listen port. - pub fn import_bootstrap_libp2p_address( - &mut self, - server: &str, - port: Option, - ) -> Result<()> { - let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; - - if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr(port) { - info!( - self.log, - "Estimated bootstrapper libp2p address"; - "multiaddr" => format!("{:?}", server_multiaddr) - ); - - self.client_config - .network - .libp2p_nodes - .push(server_multiaddr); - } else { - warn!( - self.log, - "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." - ); - }; - - Ok(()) - } - - /// Import the enr address for `server` into the list of initial enrs (boot nodes). - pub fn import_bootstrap_enr_address(&mut self, server: &str) -> Result<()> { - let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; - - if let Ok(enr) = bootstrapper.enr() { - info!( - self.log, - "Loaded bootstrapper libp2p address"; - "enr" => format!("{:?}", enr) - ); - - self.client_config.network.boot_nodes.push(enr); - } else { - warn!( - self.log, - "Unable to estimate a bootstrapper enr address, this node may not find any peers." - ); - }; - - Ok(()) - } - - /// Set the config data_dir to be an random directory. - /// - /// Useful for easily spinning up ephemeral testnets. - pub fn set_random_datadir(&mut self) -> Result<()> { - self.client_config - .data_dir - .push(format!("random_{}", random_string(6))); - self.client_config.network.network_dir = self.client_config.data_dir.join("network"); - - Ok(()) - } - - /// Imports an `Eth2Config` from `server`, returning an error if this fails. - pub fn import_bootstrap_eth2_config(&mut self, server: &str) -> Result<()> { - let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; - - self.update_eth2_config(bootstrapper.eth2_config()?); - - Ok(()) - } - - fn update_eth2_config(&mut self, eth2_config: Eth2Config) { - self.eth2_config = eth2_config; - } - - fn set_slot_time(&mut self, milliseconds_per_slot: u64) { - self.eth2_config.spec.milliseconds_per_slot = milliseconds_per_slot; - } - - /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. - /// - /// Returns an error if the `--spec` flag is not present in the given `cli_args`. - pub fn update_spec_from_subcommand(&mut self, cli_args: &ArgMatches) -> Result<()> { - // Re-initialise the `Eth2Config`. - // - // If a CLI parameter is set, overwrite any config file present. - // If a parameter is not set, use either the config file present or default to minimal. - let eth2_config = match cli_args.value_of("spec") { - Some("mainnet") => Eth2Config::mainnet(), - Some("minimal") => Eth2Config::minimal(), - Some("interop") => Eth2Config::interop(), - _ => return Err("Unable to determine specification type.".into()), - }; - - self.client_config.spec_constants = cli_args - .value_of("spec") - .expect("Guarded by prior match statement") - .to_string(); - self.eth2_config = eth2_config; - - Ok(()) - } - - /// Writes the configs in `self` to `self.data_dir`. - /// - /// Returns an error if `self.data_dir` already exists. - pub fn write_configs_to_new_datadir(&mut self) -> Result<()> { - let db_exists = self - .client_config - .get_db_path() - .map(|d| d.exists()) - .unwrap_or_else(|| false); - - // Do not permit creating a new config when the datadir exists. - if db_exists { - return Err("Database already exists. See `-f` or `-r` in `testnet --help`".into()); - } - - // Create `datadir` and any non-existing parent directories. - fs::create_dir_all(&self.client_config.data_dir).map_err(|e| { - crit!(self.log, "Failed to initialize data dir"; "error" => format!("{}", e)); - format!("{}", e) - })?; - - let client_config_file = self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME); - if client_config_file.exists() { - return Err(format!( - "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", - CLIENT_CONFIG_FILENAME - )); - } else { - // Write the onfig to a TOML file in the datadir. - write_to_file( - self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME), - &self.client_config, - ) - .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; - } - - let eth2_config_file = self.client_config.data_dir.join(ETH2_CONFIG_FILENAME); - if eth2_config_file.exists() { - return Err(format!( - "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", - ETH2_CONFIG_FILENAME - )); - } else { - // Write the config to a TOML file in the datadir. - write_to_file( - self.client_config.data_dir.join(ETH2_CONFIG_FILENAME), - &self.eth2_config, - ) - .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; - } - - Ok(()) - } - - /// Attempts to load the client and eth2 configs from `self.data_dir`. - /// - /// Returns an error if any files are not found or are invalid. - pub fn load_from_datadir(&mut self) -> Result<()> { - // Check to ensure the datadir exists. - // - // For now we return an error. In the future we may decide to boot a default (e.g., - // public testnet or mainnet). - if !self - .client_config - .get_data_dir() - .map_or(false, |d| d.exists()) - { - return Err( - "No datadir found. Either create a new testnet or specify a different `--datadir`." - .into(), - ); - } - - // If there is a path to a database in the config, ensure it exists. - if !self - .client_config - .get_db_path() - .map_or(false, |path| path.exists()) - { - return Err( - "No database found in datadir. Use 'testnet -f' to overwrite the existing \ - datadir, or specify a different `--datadir`." - .into(), - ); - } - - self.load_eth2_config(self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; - self.load_client_config(self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; - - Ok(()) - } - - /// Attempts to load the client config from `path`. - /// - /// Returns an error if any files are not found or are invalid. - pub fn load_client_config(&mut self, path: PathBuf) -> Result<()> { - self.client_config = read_from_file::(path.clone()) - .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? - .ok_or_else(|| format!("{:?} file does not exist", path))?; - - Ok(()) - } - - /// Attempts to load the eth2 config from `path`. - /// - /// Returns an error if any files are not found or are invalid. - pub fn load_eth2_config(&mut self, path: PathBuf) -> Result<()> { - self.eth2_config = read_from_file::(path.clone()) - .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? - .ok_or_else(|| format!("{:?} file does not exist", path))?; - - Ok(()) - } - - /// Sets all listening addresses to the given `addr`. - pub fn set_listen_addresses(&mut self, addr: String) -> Result<()> { - let addr = addr - .parse::() - .map_err(|e| format!("Unable to parse default listen address: {:?}", e))?; - - self.client_config.network.listen_address = addr.into(); - self.client_config.rest_api.listen_address = addr; - - Ok(()) - } - - /// Consumes self, returning the configs. - /// - /// The supplied `cli_args` should be the base-level `clap` cli_args (i.e., not a subcommand - /// cli_args). - pub fn build(mut self, cli_args: &ArgMatches) -> Result { - self.client_config.apply_cli_args(cli_args, &self.log)?; - - if let Some(bump) = cli_args.value_of("port-bump") { - let bump = bump - .parse::() - .map_err(|e| format!("Unable to parse port bump: {}", e))?; - - self.client_config.network.libp2p_port += bump; - self.client_config.network.discovery_port += bump; - self.client_config.rest_api.port += bump; - self.client_config.websocket_server.port += bump; - } - - if self.eth2_config.spec_constants != self.client_config.spec_constants { - crit!(self.log, "Specification constants do not match."; - "client_config" => self.client_config.spec_constants.to_string(), - "eth2_config" => self.eth2_config.spec_constants.to_string() - ); - return Err("Specification constant mismatch".into()); - } - - Ok((self.client_config, self.eth2_config, self.log)) - } -} - fn random_string(len: usize) -> String { rand::thread_rng() .sample_iter(&Alphanumeric) diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 6797095b62..36f69ab765 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -57,15 +57,15 @@ impl ProductionBeaconNode { ) -> impl Future + 'a { let log = context.log.clone(); - // TODO: the eth2 config in the env is being completely ignored. + // TODO: the eth2 config in the env is being modified. // // See https://github.com/sigp/lighthouse/issues/602 - get_configs(&matches, log).into_future().and_then( - move |(client_config, eth2_config, _log)| { + get_configs::(&matches, context.eth2_config.clone(), log) + .into_future() + .and_then(move |(client_config, eth2_config, _log)| { context.eth2_config = eth2_config; Self::new(context, client_config) - }, - ) + }) } /// Starts a new beacon node `Client` in the given `environment`. @@ -126,10 +126,15 @@ impl ProductionBeaconNode { .system_time_slot_clock()? .websocket_event_handler(client_config.websocket_server.clone())? .build_beacon_chain()? - .libp2p_network(&client_config.network)? - .http_server(&client_config, &http_eth2_config)? - .peer_count_notifier()? - .slot_notifier()?; + .libp2p_network(&client_config.network)?; + + let builder = if client_config.rest_api.enabled { + builder.http_server(&client_config, &http_eth2_config)? + } else { + builder + }; + + let builder = builder.peer_count_notifier()?.slot_notifier()?; Ok(Self(builder.build())) }) diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index d1e66674cc..71edef8469 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -1,12 +1,25 @@ #![cfg(test)] -use node_test_rig::{environment::EnvironmentBuilder, testing_client_config, LocalBeaconNode}; -use types::{MinimalEthSpec, Slot}; +use node_test_rig::{ + environment::{Environment, EnvironmentBuilder}, + testing_client_config, LocalBeaconNode, +}; +use types::{EthSpec, MinimalEthSpec, Slot}; fn env_builder() -> EnvironmentBuilder { EnvironmentBuilder::minimal() } +fn build_node(env: &mut Environment) -> LocalBeaconNode { + let context = env.core_context(); + env.runtime() + .block_on(LocalBeaconNode::production( + context, + testing_client_config(), + )) + .expect("should block until node created") +} + #[test] fn http_server_genesis_state() { let mut env = env_builder() @@ -17,7 +30,7 @@ fn http_server_genesis_state() { .build() .expect("environment should build"); - let node = LocalBeaconNode::production(env.core_context(), testing_client_config()); + let node = build_node(&mut env); let remote_node = node.remote_node().expect("should produce remote node"); let (api_state, _root) = env diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml index 9ea8d9b650..f45c718e1c 100644 --- a/beacon_node/websocket_server/Cargo.toml +++ b/beacon_node/websocket_server/Cargo.toml @@ -7,7 +7,6 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = "2.33.0" exit-future = "0.1.4" futures = "0.1.29" serde = "1.0.102" diff --git a/beacon_node/websocket_server/src/config.rs b/beacon_node/websocket_server/src/config.rs index c07f0da838..f521ca33d4 100644 --- a/beacon_node/websocket_server/src/config.rs +++ b/beacon_node/websocket_server/src/config.rs @@ -1,5 +1,4 @@ -use clap::ArgMatches; -use serde::{Deserialize, Serialize}; +use serde_derive::{Deserialize, Serialize}; use std::net::Ipv4Addr; /// The core configuration of a Lighthouse beacon node. @@ -21,25 +20,3 @@ impl Default for Config { } } } - -impl Config { - pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("no-ws") { - self.enabled = false; - } - - if let Some(rpc_address) = args.value_of("ws-address") { - self.listen_address = rpc_address - .parse::() - .map_err(|_| "ws-address is not a valid IPv4 address.")?; - } - - if let Some(rpc_port) = args.value_of("ws-port") { - self.port = rpc_port - .parse::() - .map_err(|_| "ws-port is not a valid u16.")?; - } - - Ok(()) - } -} diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index 0cec0a3616..ac42120882 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -8,7 +8,9 @@ pub use reduced_tree::ThreadSafeReducedTree; pub type Result = std::result::Result; -pub trait LmdGhost: Send + Sync + Sized { +// Note: the `PartialEq` bound is only required for testing. If it becomes a serious annoyance we +// can remove it. +pub trait LmdGhost: PartialEq + Send + Sync + Sized { /// Create a new instance, with the given `store` and `finalized_root`. fn new(store: Arc, finalized_block: &BeaconBlock, finalized_root: Hash256) -> Self; @@ -54,7 +56,7 @@ pub trait LmdGhost: Send + Sync + Sized { fn verify_integrity(&self) -> Result<()>; /// Encode the `LmdGhost` instance to bytes. - fn as_bytes(self) -> Vec; + fn as_bytes(&self) -> Vec; /// Create a new `LmdGhost` instance given a `store` and encoded bytes. fn from_bytes(bytes: &[u8], store: Arc) -> Result; diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index a841378363..65aa22e584 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -55,6 +55,13 @@ impl fmt::Debug for ThreadSafeReducedTree { } } +impl PartialEq for ThreadSafeReducedTree { + /// This implementation ignores the `store`. + fn eq(&self, other: &Self) -> bool { + *self.core.read() == *other.core.read() + } +} + impl LmdGhost for ThreadSafeReducedTree where T: Store, @@ -121,8 +128,8 @@ where } /// Consume the `ReducedTree` object and return its ssz encoded bytes representation. - fn as_bytes(self) -> Vec { - self.core.into_inner().as_bytes() + fn as_bytes(&self) -> Vec { + self.core.read().as_bytes() } /// Create a new `ThreadSafeReducedTree` instance from a `store` and the @@ -200,6 +207,15 @@ impl fmt::Debug for ReducedTree { } } +impl PartialEq for ReducedTree { + /// This implementation ignores the `store` field. + fn eq(&self, other: &Self) -> bool { + self.nodes == other.nodes + && self.latest_votes == other.latest_votes + && self.root == other.root + } +} + impl ReducedTree where T: Store, @@ -918,7 +934,7 @@ pub struct Vote { /// /// E.g., a `get` or `insert` to an out-of-bounds element will cause the Vec to grow (using /// Default) to the smallest size required to fulfill the request. -#[derive(Default, Clone, Debug)] +#[derive(Default, Clone, Debug, PartialEq)] pub struct ElasticList(Vec); impl ElasticList diff --git a/eth2/operation_pool/src/persistence.rs b/eth2/operation_pool/src/persistence.rs index bb423891a9..230e54ae75 100644 --- a/eth2/operation_pool/src/persistence.rs +++ b/eth2/operation_pool/src/persistence.rs @@ -8,7 +8,7 @@ use types::*; /// /// Operations are stored in arbitrary order, so it's not a good idea to compare instances /// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first. -#[derive(Encode, Decode)] +#[derive(Clone, Encode, Decode)] pub struct PersistedOperationPool { /// Mapping from attestation ID to attestation mappings. // We could save space by not storing the attestation ID, but it might diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index c60b89a0c3..e94ef413da 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -213,19 +213,35 @@ pub fn process_eth1_data( state: &mut BeaconState, eth1_data: &Eth1Data, ) -> Result<(), Error> { + if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data) { + state.eth1_data = new_eth1_data; + } + state.eth1_data_votes.push(eth1_data.clone())?; + Ok(()) +} + +/// Returns `Some(eth1_data)` if adding the given `eth1_data` to `state.eth1_data_votes` would +/// result in a change to `state.eth1_data`. +/// +/// Spec v0.9.1 +pub fn get_new_eth1_data( + state: &BeaconState, + eth1_data: &Eth1Data, +) -> Option { let num_votes = state .eth1_data_votes .iter() .filter(|vote| *vote == eth1_data) .count(); - if num_votes * 2 > T::SlotsPerEth1VotingPeriod::to_usize() { - state.eth1_data = eth1_data.clone(); + // The +1 is to account for the `eth1_data` supplied to the function. + if 2 * (num_votes + 1) > T::SlotsPerEth1VotingPeriod::to_usize() { + Some(eth1_data.clone()) + } else { + None } - - Ok(()) } /// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object. diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index e3aa7b475c..4a36d631a7 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -319,7 +319,7 @@ mod tests { } // Yaml Config is declared here in order to access domain fields of ChainSpec which are private fields. -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "UPPERCASE")] #[serde(default)] #[serde(deny_unknown_fields)] diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index d6d7dae7a8..a82d398f91 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -48,7 +48,7 @@ pub use crate::beacon_block_body::BeaconBlockBody; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{Error as BeaconStateError, *}; -pub use crate::chain_spec::{ChainSpec, Domain}; +pub use crate::chain_spec::{ChainSpec, Domain, YamlConfig}; pub use crate::checkpoint::Checkpoint; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; diff --git a/eth2/utils/bls/src/fake_public_key.rs b/eth2/utils/bls/src/fake_public_key.rs index f9440d86de..489c003dd8 100644 --- a/eth2/utils/bls/src/fake_public_key.rs +++ b/eth2/utils/bls/src/fake_public_key.rs @@ -13,7 +13,7 @@ use std::hash::{Hash, Hasher}; /// /// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ /// serialization). -#[derive(Debug, Clone, Eq)] +#[derive(Clone, Eq)] pub struct FakePublicKey { bytes: Vec, /// Never used, only use for compatibility with "real" `PublicKey`. @@ -93,6 +93,12 @@ impl fmt::Display for FakePublicKey { } } +impl fmt::Debug for FakePublicKey { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "0x{}", self.as_hex_string()) + } +} + impl default::Default for FakePublicKey { fn default() -> Self { let secret_key = SecretKey::random(); diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index 87204fae19..861e4d99e5 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -81,7 +81,7 @@ impl fmt::Display for PublicKey { impl fmt::Debug for PublicKey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.as_hex_string()) + write!(f, "0x{}", self.as_hex_string()) } } diff --git a/eth2/utils/deposit_contract/.gitignore b/eth2/utils/deposit_contract/.gitignore index 81b46ff033..2af4fb80c7 100644 --- a/eth2/utils/deposit_contract/.gitignore +++ b/eth2/utils/deposit_contract/.gitignore @@ -1 +1 @@ -contract/ +contracts/ diff --git a/eth2/utils/deposit_contract/build.rs b/eth2/utils/deposit_contract/build.rs index 5923788793..ea59acc82c 100644 --- a/eth2/utils/deposit_contract/build.rs +++ b/eth2/utils/deposit_contract/build.rs @@ -3,21 +3,24 @@ //! //! These files are required for some `include_bytes` calls used in this crate. -use reqwest::Response; use serde_json::Value; use std::env; use std::fs::File; use std::io::Write; use std::path::PathBuf; -const GITHUB_RAW: &str = "https://raw.githubusercontent.com"; -const SPEC_REPO: &str = "ethereum/eth2.0-specs"; -const SPEC_TAG: &str = "v0.8.3"; -const ABI_FILE: &str = "validator_registration.json"; -const BYTECODE_FILE: &str = "validator_registration.bytecode"; +const TAG: &str = "v0.9.2"; +const UNSAFE_TAG: &str = "v0.9.2.1"; + +fn spec_url() -> String { + format!("https://raw.githubusercontent.com/ethereum/eth2.0-specs/{}/deposit_contract/contracts/validator_registration.json", TAG) +} +fn testnet_url() -> String { + format!("https://raw.githubusercontent.com/sigp/unsafe-eth2-deposit-contract/{}/unsafe_validator_registration.json", UNSAFE_TAG) +} fn main() { - match init_deposit_contract_abi() { + match get_all_contracts() { Ok(()) => (), Err(e) => panic!(e), } @@ -25,14 +28,33 @@ fn main() { /// Attempts to download the deposit contract ABI from github if a local copy is not already /// present. -pub fn init_deposit_contract_abi() -> Result<(), String> { - let abi_file = abi_dir().join(format!("{}_{}", SPEC_TAG, ABI_FILE)); - let bytecode_file = abi_dir().join(format!("{}_{}", SPEC_TAG, BYTECODE_FILE)); +pub fn get_all_contracts() -> Result<(), String> { + download_deposit_contract( + &spec_url(), + "validator_registration.json", + "validator_registration.bytecode", + )?; + download_deposit_contract( + &testnet_url(), + "testnet_validator_registration.json", + "testnet_validator_registration.bytecode", + ) +} + +/// Attempts to download the deposit contract ABI from github if a local copy is not already +/// present. +pub fn download_deposit_contract( + url: &str, + abi_file: &str, + bytecode_file: &str, +) -> Result<(), String> { + let abi_file = abi_dir().join(format!("{}_{}", TAG, abi_file)); + let bytecode_file = abi_dir().join(format!("{}_{}", TAG, bytecode_file)); if abi_file.exists() { // Nothing to do. } else { - match download_abi() { + match reqwest::get(url) { Ok(mut response) => { let mut abi_file = File::create(abi_file) .map_err(|e| format!("Failed to create local abi file: {:?}", e))?; @@ -71,22 +93,13 @@ pub fn init_deposit_contract_abi() -> Result<(), String> { Ok(()) } -/// Attempts to download the deposit contract file from the Ethereum github. -fn download_abi() -> Result { - reqwest::get(&format!( - "{}/{}/{}/deposit_contract/contracts/{}", - GITHUB_RAW, SPEC_REPO, SPEC_TAG, ABI_FILE - )) - .map_err(|e| format!("Failed to download deposit ABI from github: {:?}", e)) -} - /// Returns the directory that will be used to store the deposit contract ABI. fn abi_dir() -> PathBuf { let base = env::var("CARGO_MANIFEST_DIR") .expect("should know manifest dir") .parse::() .expect("should parse manifest dir as path") - .join("contract"); + .join("contracts"); std::fs::create_dir_all(base.clone()) .expect("should be able to create abi directory in manifest"); diff --git a/eth2/utils/deposit_contract/src/lib.rs b/eth2/utils/deposit_contract/src/lib.rs index c4f2f1ef68..a235d0ff71 100644 --- a/eth2/utils/deposit_contract/src/lib.rs +++ b/eth2/utils/deposit_contract/src/lib.rs @@ -1,21 +1,32 @@ use ethabi::{Contract, Token}; use ssz::Encode; +use tree_hash::TreeHash; use types::DepositData; pub use ethabi::Error; pub const CONTRACT_DEPLOY_GAS: usize = 4_000_000; pub const DEPOSIT_GAS: usize = 4_000_000; -pub const ABI: &[u8] = include_bytes!("../contract/v0.8.3_validator_registration.json"); -pub const BYTECODE: &[u8] = include_bytes!("../contract/v0.8.3_validator_registration.bytecode"); +pub const ABI: &[u8] = include_bytes!("../contracts/v0.9.2_validator_registration.json"); +pub const BYTECODE: &[u8] = include_bytes!("../contracts/v0.9.2_validator_registration.bytecode"); + +pub mod testnet { + pub const ABI: &[u8] = + include_bytes!("../contracts/v0.9.2_testnet_validator_registration.json"); + pub const BYTECODE: &[u8] = + include_bytes!("../contracts/v0.9.2_testnet_validator_registration.bytecode"); +} pub fn eth1_tx_data(deposit_data: &DepositData) -> Result, Error> { let params = vec![ Token::Bytes(deposit_data.pubkey.as_ssz_bytes()), Token::Bytes(deposit_data.withdrawal_credentials.as_ssz_bytes()), Token::Bytes(deposit_data.signature.as_ssz_bytes()), + Token::FixedBytes(deposit_data.tree_hash_root().as_ssz_bytes()), ]; + // Here we make an assumption that the `crate::testnet::ABI` has a superset of the features of + // the crate::ABI`. let abi = Contract::load(ABI)?; let function = abi.function("deposit")?; function.encode_input(¶ms) @@ -51,6 +62,6 @@ mod tests { let data = eth1_tx_data(&deposit).expect("should produce tx data"); - assert_eq!(data.len(), 388, "bytes should be correct length"); + assert_eq!(data.len(), 420, "bytes should be correct length"); } } diff --git a/eth2/utils/eth2_testnet_config/Cargo.toml b/eth2/utils/eth2_testnet_config/Cargo.toml new file mode 100644 index 0000000000..399219207b --- /dev/null +++ b/eth2/utils/eth2_testnet_config/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "eth2_testnet_config" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dev-dependencies] +tempdir = "0.3" + +[dependencies] +serde = "1.0" +serde_yaml = "0.8" +types = { path = "../../types"} +eth2-libp2p = { path = "../../../beacon_node/eth2-libp2p"} +eth2_ssz = { path = "../ssz"} diff --git a/eth2/utils/eth2_testnet_config/src/lib.rs b/eth2/utils/eth2_testnet_config/src/lib.rs new file mode 100644 index 0000000000..4b0d304089 --- /dev/null +++ b/eth2/utils/eth2_testnet_config/src/lib.rs @@ -0,0 +1,260 @@ +//! This crate should eventually represent the structure at this repo: +//! +//! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1 +//! +//! It is not accurate at the moment, we include extra files and we also don't support a few +//! others. We are unable to confirm to the repo until we have the following PR merged: +//! +//! https://github.com/sigp/lighthouse/pull/605 + +use eth2_libp2p::Enr; +use ssz::{Decode, Encode}; +use std::fs::{create_dir_all, File}; +use std::io::{Read, Write}; +use std::path::PathBuf; +use types::{Address, BeaconState, EthSpec, YamlConfig}; + +pub const ADDRESS_FILE: &str = "deposit_contract.txt"; +pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt"; +pub const BOOT_ENR_FILE: &str = "boot_enr.yaml"; +pub const GENESIS_STATE_FILE: &str = "genesis.ssz"; +pub const YAML_CONFIG_FILE: &str = "config.yaml"; + +pub const HARDCODED_YAML_CONFIG: &[u8] = include_bytes!("../testnet/config.yaml"); +pub const HARDCODED_DEPLOY_BLOCK: &[u8] = include_bytes!("../testnet/deploy_block.txt"); +pub const HARDCODED_DEPOSIT_CONTRACT: &[u8] = include_bytes!("../testnet/deposit_contract.txt"); +pub const HARDCODED_GENESIS_STATE: &[u8] = include_bytes!("../testnet/genesis.ssz"); +pub const HARDCODED_BOOT_ENR: &[u8] = include_bytes!("../testnet/boot_enr.yaml"); + +/// Specifies an Eth2 testnet. +/// +/// See the crate-level documentation for more details. +#[derive(Clone, PartialEq, Debug)] +pub struct Eth2TestnetConfig { + pub deposit_contract_address: String, + pub deposit_contract_deploy_block: u64, + pub boot_enr: Option>, + pub genesis_state: Option>, + pub yaml_config: Option, +} + +impl Eth2TestnetConfig { + // Creates the `Eth2TestnetConfig` that was included in the binary at compile time. This can be + // considered the default Lighthouse testnet. + // + // Returns an error if those included bytes are invalid (this is unlikely). + pub fn hard_coded() -> Result { + Ok(Self { + deposit_contract_address: serde_yaml::from_reader(HARDCODED_DEPOSIT_CONTRACT) + .map_err(|e| format!("Unable to parse contract address: {:?}", e))?, + deposit_contract_deploy_block: serde_yaml::from_reader(HARDCODED_DEPLOY_BLOCK) + .map_err(|e| format!("Unable to parse deploy block: {:?}", e))?, + boot_enr: Some( + serde_yaml::from_reader(HARDCODED_BOOT_ENR) + .map_err(|e| format!("Unable to parse boot enr: {:?}", e))?, + ), + genesis_state: Some( + BeaconState::from_ssz_bytes(HARDCODED_GENESIS_STATE) + .map_err(|e| format!("Unable to parse genesis state: {:?}", e))?, + ), + yaml_config: Some( + serde_yaml::from_reader(HARDCODED_YAML_CONFIG) + .map_err(|e| format!("Unable to parse genesis state: {:?}", e))?, + ), + }) + } + + // Write the files to the directory, only if the directory doesn't already exist. + pub fn write_to_file(&self, base_dir: PathBuf) -> Result<(), String> { + if base_dir.exists() { + return Err("Testnet directory already exists".to_string()); + } + + self.force_write_to_file(base_dir) + } + + // Write the files to the directory, even if the directory already exists. + pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> { + create_dir_all(&base_dir) + .map_err(|e| format!("Unable to create testnet directory: {:?}", e))?; + + macro_rules! write_to_yaml_file { + ($file: ident, $variable: expr) => { + File::create(base_dir.join($file)) + .map_err(|e| format!("Unable to create {}: {:?}", $file, e)) + .and_then(|mut file| { + let yaml = serde_yaml::to_string(&$variable) + .map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?; + + // Remove the doc header from the YAML file. + // + // This allows us to play nice with other clients that are expecting + // plain-text, not YAML. + let no_doc_header = if yaml.starts_with("---\n") { + &yaml[4..] + } else { + &yaml + }; + + file.write_all(no_doc_header.as_bytes()) + .map_err(|e| format!("Unable to write {}: {:?}", $file, e)) + })?; + }; + } + + write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address); + write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block); + + if let Some(boot_enr) = &self.boot_enr { + write_to_yaml_file!(BOOT_ENR_FILE, boot_enr); + } + + if let Some(yaml_config) = &self.yaml_config { + write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config); + } + + // The genesis state is a special case because it uses SSZ, not YAML. + if let Some(genesis_state) = &self.genesis_state { + let file = base_dir.join(GENESIS_STATE_FILE); + + File::create(&file) + .map_err(|e| format!("Unable to create {:?}: {:?}", file, e)) + .and_then(|mut file| { + file.write_all(&genesis_state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write {:?}: {:?}", file, e)) + })?; + } + + Ok(()) + } + + pub fn load(base_dir: PathBuf) -> Result { + macro_rules! load_from_file { + ($file: ident) => { + File::open(base_dir.join($file)) + .map_err(|e| format!("Unable to open {}: {:?}", $file, e)) + .and_then(|file| { + serde_yaml::from_reader(file) + .map_err(|e| format!("Unable to parse {}: {:?}", $file, e)) + })?; + }; + } + + macro_rules! optional_load_from_file { + ($file: ident) => { + if base_dir.join($file).exists() { + Some(load_from_file!($file)) + } else { + None + } + }; + } + + let deposit_contract_address = load_from_file!(ADDRESS_FILE); + let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE); + let boot_enr = optional_load_from_file!(BOOT_ENR_FILE); + let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE); + + // The genesis state is a special case because it uses SSZ, not YAML. + let file = base_dir.join(GENESIS_STATE_FILE); + let genesis_state = if base_dir.join(&file).exists() { + Some( + File::open(base_dir.join(&file)) + .map_err(|e| format!("Unable to open {:?}: {:?}", file, e)) + .and_then(|mut file| { + let mut bytes = vec![]; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {:?}: {:?}", file, e))?; + + BeaconState::from_ssz_bytes(&bytes) + .map_err(|e| format!("Unable to SSZ decode {:?}: {:?}", file, e)) + })?, + ) + } else { + None + }; + + Ok(Self { + deposit_contract_address, + deposit_contract_deploy_block, + boot_enr, + genesis_state, + yaml_config, + }) + } + + pub fn deposit_contract_address(&self) -> Result { + if self.deposit_contract_address.starts_with("0x") { + self.deposit_contract_address[2..] + .parse() + .map_err(|e| format!("Corrupted address, unable to parse: {:?}", e)) + } else { + Err("Corrupted address, must start with 0x".to_string()) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempdir::TempDir; + use types::{Eth1Data, Hash256, MinimalEthSpec, YamlConfig}; + + type E = MinimalEthSpec; + + #[test] + fn hard_coded_works() { + let dir: Eth2TestnetConfig = + Eth2TestnetConfig::hard_coded().expect("should decode hard_coded params"); + + assert!(dir.boot_enr.is_some()); + assert!(dir.genesis_state.is_some()); + assert!(dir.yaml_config.is_some()); + } + + #[test] + fn round_trip() { + let spec = &E::default_spec(); + + let eth1_data = Eth1Data { + deposit_root: Hash256::zero(), + deposit_count: 0, + block_hash: Hash256::zero(), + }; + + // TODO: figure out how to generate ENR and add some here. + let boot_enr = None; + let genesis_state = Some(BeaconState::new(42, eth1_data, spec)); + let yaml_config = Some(YamlConfig::from_spec::(spec)); + + do_test::(boot_enr, genesis_state.clone(), yaml_config.clone()); + do_test::(None, None, None); + } + + fn do_test( + boot_enr: Option>, + genesis_state: Option>, + yaml_config: Option, + ) { + let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir"); + let base_dir = PathBuf::from(temp_dir.path().join("my_testnet")); + let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string(); + let deposit_contract_deploy_block = 42; + + let testnet: Eth2TestnetConfig = Eth2TestnetConfig { + deposit_contract_address: deposit_contract_address.clone(), + deposit_contract_deploy_block: deposit_contract_deploy_block, + boot_enr, + genesis_state, + yaml_config, + }; + + testnet + .write_to_file(base_dir.clone()) + .expect("should write to file"); + + let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct"); + + assert_eq!(testnet, decoded, "should decode as encoded"); + } +} diff --git a/eth2/utils/eth2_testnet_config/testnet/boot_enr.yaml b/eth2/utils/eth2_testnet_config/testnet/boot_enr.yaml new file mode 100644 index 0000000000..149ce546f5 --- /dev/null +++ b/eth2/utils/eth2_testnet_config/testnet/boot_enr.yaml @@ -0,0 +1,2 @@ +- "enr:-Iu4QPONEndy6aWOJLWBaCLS1KRg7YPeK0qptnxJzuBW8OcFP9tLgA_ewmAvHBzn9zPG6XIgdH83Mq_5cyLF5yWRYmYBgmlkgnY0gmlwhDaZ6cGJc2VjcDI1NmsxoQK-9tWOso2Kco7L5L-zKoj-MwPfeBbEP12bxr9bqzwZV4N0Y3CCIyiDdWRwgiMo" +- "enr:-Iu4QGVXt2bKzkITBsPKqFOhxPMmZhMJvEzPdk_zhfvoWHxBX4oGrtiup1ReLVJijfEazL8Iv-0t7ZQnZy9NvqI4F0YBgmlkgnY0gmlwhDQrTeaJc2VjcDI1NmsxoQOb5IvXo9O253FD1AYoPwQpNM79-mLg8_HV1NevjZnTt4N0Y3CCIyiDdWRwgiMo" \ No newline at end of file diff --git a/eth2/utils/eth2_testnet_config/testnet/config.yaml b/eth2/utils/eth2_testnet_config/testnet/config.yaml new file mode 100644 index 0000000000..1693cdf7a0 --- /dev/null +++ b/eth2/utils/eth2_testnet_config/testnet/config.yaml @@ -0,0 +1,49 @@ +FAR_FUTURE_EPOCH: 18446744073709551615 +BASE_REWARDS_PER_EPOCH: 4 +DEPOSIT_CONTRACT_TREE_DEPTH: 32 +SECONDS_PER_DAY: 480 +MAX_COMMITTEES_PER_SLOT: 4 +TARGET_COMMITTEE_SIZE: 4 +MIN_PER_EPOCH_CHURN_LIMIT: 4 +CHURN_LIMIT_QUOTIENT: 65536 +SHUFFLE_ROUND_COUNT: 10 +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 64 +MIN_GENESIS_TIME: 0 +MIN_DEPOSIT_AMOUNT: 100 +MAX_EFFECTIVE_BALANCE: 3200000000 +EJECTION_BALANCE: 1600000000 +EFFECTIVE_BALANCE_INCREMENT: 100000000 +GENESIS_SLOT: 0 +BLS_WITHDRAWAL_PREFIX: 0x00 +SECONDS_PER_SLOT: 12 +MIN_ATTESTATION_INCLUSION_DELAY: 1 +MIN_SEED_LOOKAHEAD: 1 +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +PERSISTENT_COMMITTEE_PERIOD: 2048 +MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 +BASE_REWARD_FACTOR: 64 +WHISTLEBLOWER_REWARD_QUOTIENT: 512 +PROPOSER_REWARD_QUOTIENT: 8 +INACTIVITY_PENALTY_QUOTIENT: 33554432 +MIN_SLASHING_PENALTY_QUOTIENT: 32 +SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 +DOMAIN_BEACON_PROPOSER: 0x00000000 +DOMAIN_BEACON_ATTESTER: 0x01000000 +DOMAIN_RANDAO: 0x02000000 +DOMAIN_DEPOSIT: 0x03000000 +DOMAIN_VOLUNTARY_EXIT: 0x04000000 +JUSTIFICATION_BITS_LENGTH: 0x04000000 +MAX_VALIDATORS_PER_COMMITTEE: 2048 +GENESIS_EPOCH: 0 +SLOTS_PER_EPOCH: 8 +SLOTS_PER_ETH1_VOTING_PERIOD: 16 +SLOTS_PER_HISTORICAL_ROOT: 64 +EPOCHS_PER_HISTORICAL_VECTOR: 64 +EPOCHS_PER_SLASHINGS_VECTOR: 64 +HISTORICAL_ROOTS_LIMIT: 16777216 +VALIDATOR_REGISTRY_LIMIT: 1099511627776 +MAX_PROPOSER_SLASHINGS: 16 +MAX_ATTESTER_SLASHINGS: 1 +MAX_ATTESTATIONS: 128 +MAX_DEPOSITS: 16 +MAX_VOLUNTARY_EXITS: 16 \ No newline at end of file diff --git a/eth2/utils/eth2_testnet_config/testnet/deploy_block.txt b/eth2/utils/eth2_testnet_config/testnet/deploy_block.txt new file mode 100644 index 0000000000..9272e58904 --- /dev/null +++ b/eth2/utils/eth2_testnet_config/testnet/deploy_block.txt @@ -0,0 +1 @@ +1743571 \ No newline at end of file diff --git a/eth2/utils/eth2_testnet_config/testnet/deposit_contract.txt b/eth2/utils/eth2_testnet_config/testnet/deposit_contract.txt new file mode 100644 index 0000000000..55ea9dc71e --- /dev/null +++ b/eth2/utils/eth2_testnet_config/testnet/deposit_contract.txt @@ -0,0 +1 @@ +0xf382356688ae7dd3c2d6deb7e79c3ffe68816251 \ No newline at end of file diff --git a/eth2/utils/eth2_testnet_config/testnet/genesis.ssz b/eth2/utils/eth2_testnet_config/testnet/genesis.ssz new file mode 100644 index 0000000000..90edf24e46 Binary files /dev/null and b/eth2/utils/eth2_testnet_config/testnet/genesis.ssz differ diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index b8bd0e16f2..a3ec8909ba 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -22,3 +22,6 @@ eth1_test_rig = { path = "../tests/eth1_test_rig" } futures = "0.1.25" environment = { path = "../lighthouse/environment" } web3 = "0.8.0" +eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" } +dirs = "2.0" +genesis = { path = "../beacon_node/genesis" } diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs new file mode 100644 index 0000000000..41a1928110 --- /dev/null +++ b/lcli/src/deploy_deposit_contract.rs @@ -0,0 +1,148 @@ +use clap::ArgMatches; +use environment::Environment; +use eth1_test_rig::DepositContract; +use eth2_testnet_config::Eth2TestnetConfig; +use std::fs::File; +use std::io::Read; +use std::path::PathBuf; +use types::{ChainSpec, EthSpec, YamlConfig}; +use web3::{transports::Http, Web3}; + +pub const SECONDS_PER_ETH1_BLOCK: u64 = 15; + +pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { + let min_genesis_time = matches + .value_of("min-genesis-time") + .ok_or_else(|| "min_genesis_time not specified")? + .parse::() + .map_err(|e| format!("Failed to parse min_genesis_time: {}", e))?; + + let confirmations = matches + .value_of("confirmations") + .ok_or_else(|| "Confirmations not specified")? + .parse::() + .map_err(|e| format!("Failed to parse confirmations: {}", e))?; + + let output_dir = matches + .value_of("output") + .ok_or_else(|| ()) + .and_then(|output| output.parse::().map_err(|_| ())) + .unwrap_or_else(|_| { + dirs::home_dir() + .map(|home| home.join(".lighthouse").join("testnet")) + .expect("should locate home directory") + }); + + let password = parse_password(matches)?; + + let endpoint = matches + .value_of("eth1-endpoint") + .ok_or_else(|| "eth1-endpoint not specified")?; + + let (_event_loop, transport) = Http::new(&endpoint).map_err(|e| { + format!( + "Failed to start HTTP transport connected to ganache: {:?}", + e + ) + })?; + let web3 = Web3::new(transport); + + if output_dir.exists() { + return Err("Output directory already exists".to_string()); + } + + // It's unlikely that this will be the _actual_ deployment block, however it'll be close + // enough to serve our purposes. + // + // We only need the deposit block to put a lower bound on the block number we need to search + // for deposit logs. + let deploy_block = env + .runtime() + .block_on(web3.eth().block_number()) + .map_err(|e| format!("Failed to get block number: {}", e))?; + + info!("Present eth1 block number is {}", deploy_block); + + info!("Deploying the bytecode at https://github.com/sigp/unsafe-eth2-deposit-contract",); + + info!( + "Submitting deployment transaction, waiting for {} confirmations", + confirmations + ); + + let deposit_contract = env + .runtime() + .block_on(DepositContract::deploy_testnet( + web3, + confirmations, + password, + )) + .map_err(|e| format!("Failed to deploy contract: {}", e))?; + + info!( + "Deposit contract deployed. address: {}, min_genesis_time: {}, deploy_block: {}", + deposit_contract.address(), + min_genesis_time, + deploy_block + ); + + info!("Writing config to {:?}", output_dir); + + let mut spec = lighthouse_testnet_spec(env.core_context().eth2_config.spec.clone()); + spec.min_genesis_time = min_genesis_time; + + let testnet_config: Eth2TestnetConfig = Eth2TestnetConfig { + deposit_contract_address: format!("{}", deposit_contract.address()), + deposit_contract_deploy_block: deploy_block.as_u64(), + boot_enr: None, + genesis_state: None, + yaml_config: Some(YamlConfig::from_spec::(&spec)), + }; + + testnet_config.write_to_file(output_dir)?; + + Ok(()) +} + +/// Modfies the specification to better suit present-capacity testnets. +pub fn lighthouse_testnet_spec(mut spec: ChainSpec) -> ChainSpec { + spec.min_deposit_amount = 100; + spec.max_effective_balance = 3_200_000_000; + spec.ejection_balance = 1_600_000_000; + spec.effective_balance_increment = 100_000_000; + + // This value must be at least 2x the `ETH1_FOLLOW_DISTANCE` otherwise `all_eth1_data` can + // become a subset of `new_eth1_data` which may result in an Exception in the spec + // implementation. + // + // This value determines the delay between the eth1 block that triggers genesis and the first + // slot of that new chain. + spec.seconds_per_day = SECONDS_PER_ETH1_BLOCK * spec.eth1_follow_distance * 2; + + spec +} + +pub fn parse_password(matches: &ArgMatches) -> Result, String> { + if let Some(password_path) = matches.value_of("password") { + Ok(Some( + File::open(password_path) + .map_err(|e| format!("Unable to open password file: {:?}", e)) + .and_then(|mut file| { + let mut password = String::new(); + file.read_to_string(&mut password) + .map_err(|e| format!("Unable to read password file to string: {:?}", e)) + .map(|_| password) + }) + .map(|password| { + // Trim the linefeed from the end. + if password.ends_with("\n") { + password[0..password.len() - 1].to_string() + } else { + password + } + })?, + )) + } else { + Ok(None) + } +} diff --git a/lcli/src/deposit_contract.rs b/lcli/src/deposit_contract.rs deleted file mode 100644 index 0c5596a07d..0000000000 --- a/lcli/src/deposit_contract.rs +++ /dev/null @@ -1,78 +0,0 @@ -use clap::ArgMatches; -use environment::Environment; -use eth1_test_rig::{DelayThenDeposit, DepositContract}; -use futures::Future; -use std::time::Duration; -use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256}; -use web3::{transports::Http, Web3}; - -pub fn run_deposit_contract( - mut env: Environment, - matches: &ArgMatches, -) -> Result<(), String> { - let count = matches - .value_of("count") - .ok_or_else(|| "Deposit count not specified")? - .parse::() - .map_err(|e| format!("Failed to parse deposit count: {}", e))?; - - let delay = matches - .value_of("delay") - .ok_or_else(|| "Deposit count not specified")? - .parse::() - .map(Duration::from_millis) - .map_err(|e| format!("Failed to parse deposit count: {}", e))?; - - let confirmations = matches - .value_of("confirmations") - .ok_or_else(|| "Confirmations not specified")? - .parse::() - .map_err(|e| format!("Failed to parse confirmations: {}", e))?; - - let endpoint = matches - .value_of("endpoint") - .ok_or_else(|| "Endpoint not specified")?; - - let (_event_loop, transport) = Http::new(&endpoint).map_err(|e| { - format!( - "Failed to start HTTP transport connected to ganache: {:?}", - e - ) - })?; - let web3 = Web3::new(transport); - - let deposit_contract = env - .runtime() - .block_on(DepositContract::deploy(web3, confirmations)) - .map_err(|e| format!("Failed to deploy contract: {}", e))?; - - info!( - "Deposit contract deployed. Address: {}", - deposit_contract.address() - ); - - env.runtime() - .block_on(do_deposits::(deposit_contract, count, delay)) - .map_err(|e| format!("Failed to submit deposits: {}", e))?; - - Ok(()) -} - -fn do_deposits( - deposit_contract: DepositContract, - count: usize, - delay: Duration, -) -> impl Future { - let deposits = (0..count) - .map(|i| DelayThenDeposit { - deposit: deposit_contract.deposit_helper::( - generate_deterministic_keypair(i), - Hash256::from_low_u64_le(i as u64), - 32_000_000_000, - ), - delay, - }) - .collect(); - - deposit_contract.deposit_multiple(deposits) -} diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs new file mode 100644 index 0000000000..4b06d2c11b --- /dev/null +++ b/lcli/src/eth1_genesis.rs @@ -0,0 +1,67 @@ +use clap::ArgMatches; +use environment::Environment; +use eth2_testnet_config::Eth2TestnetConfig; +use futures::Future; +use genesis::{Eth1Config, Eth1GenesisService}; +use std::path::PathBuf; +use std::time::Duration; +use types::EthSpec; + +/// Interval between polling the eth1 node for genesis information. +pub const ETH1_GENESIS_UPDATE_INTERVAL: Duration = Duration::from_millis(7_000); + +pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { + let endpoint = matches + .value_of("eth1-endpoint") + .ok_or_else(|| "eth1-endpoint not specified")?; + + let testnet_dir = matches + .value_of("testnet-dir") + .ok_or_else(|| ()) + .and_then(|dir| dir.parse::().map_err(|_| ())) + .unwrap_or_else(|_| { + dirs::home_dir() + .map(|home| home.join(".lighthouse").join("testnet")) + .expect("should locate home directory") + }); + + let mut eth2_testnet_config: Eth2TestnetConfig = + Eth2TestnetConfig::load(testnet_dir.clone())?; + + let spec = eth2_testnet_config + .yaml_config + .as_ref() + .ok_or_else(|| "The testnet directory must contain a spec config".to_string())? + .apply_to_chain_spec::(&env.core_context().eth2_config.spec) + .ok_or_else(|| { + format!( + "The loaded config is not compatible with the {} spec", + &env.core_context().eth2_config.spec_constants + ) + })?; + + let mut config = Eth1Config::default(); + config.endpoint = endpoint.to_string(); + config.deposit_contract_address = eth2_testnet_config.deposit_contract_address.clone(); + config.deposit_contract_deploy_block = eth2_testnet_config.deposit_contract_deploy_block; + config.lowest_cached_block_number = eth2_testnet_config.deposit_contract_deploy_block; + config.follow_distance = spec.eth1_follow_distance / 2; + + let genesis_service = Eth1GenesisService::new(config, env.core_context().log.clone()); + + let future = genesis_service + .wait_for_genesis_state(ETH1_GENESIS_UPDATE_INTERVAL, spec) + .map(move |genesis_state| { + eth2_testnet_config.genesis_state = Some(genesis_state); + eth2_testnet_config.force_write_to_file(testnet_dir) + }); + + info!("Starting service to produce genesis BeaconState from eth1"); + info!("Connecting to eth1 http endpoint: {}", endpoint); + + env.runtime() + .block_on(future) + .map_err(|e| format!("Failed to find genesis: {}", e))??; + + Ok(()) +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 85af9f21e8..bc16737c2a 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -1,13 +1,14 @@ #[macro_use] extern crate log; -mod deposit_contract; +mod deploy_deposit_contract; +mod eth1_genesis; mod parse_hex; mod pycli; +mod refund_deposit_contract; mod transition_blocks; use clap::{App, Arg, SubCommand}; -use deposit_contract::run_deposit_contract; use environment::EnvironmentBuilder; use log::Level; use parse_hex::run_parse_hex; @@ -24,8 +25,6 @@ fn main() { simple_logger::init_with_level(Level::Info).expect("logger should initialize"); let matches = App::new("Lighthouse CLI Tool") - .version("0.1.0") - .author("Paul Hauner ") .about( "Performs various testing-related tasks, modelled after zcli. \ by @protolambda.", @@ -33,8 +32,6 @@ fn main() { .subcommand( SubCommand::with_name("genesis_yaml") .about("Generates a genesis YAML file") - .version("0.1.0") - .author("Paul Hauner ") .arg( Arg::with_name("num_validators") .short("n") @@ -73,8 +70,6 @@ fn main() { .subcommand( SubCommand::with_name("transition-blocks") .about("Performs a state transition given a pre-state and block") - .version("0.1.0") - .author("Paul Hauner ") .arg( Arg::with_name("pre-state") .value_name("BEACON_STATE") @@ -101,8 +96,6 @@ fn main() { .subcommand( SubCommand::with_name("pretty-hex") .about("Parses SSZ encoded as ASCII 0x-prefixed hex") - .version("0.1.0") - .author("Paul Hauner ") .arg( Arg::with_name("type") .value_name("TYPE") @@ -120,31 +113,32 @@ fn main() { ), ) .subcommand( - SubCommand::with_name("deposit-contract") + SubCommand::with_name("deploy-deposit-contract") .about( - "Uses an eth1 test rpc (e.g., ganache-cli) to simulate the deposit contract.", + "Deploy an eth1 deposit contract and create a ~/.lighthouse/testnet directory \ + (unless another directory is specified).", ) - .version("0.1.0") - .author("Paul Hauner ") .arg( - Arg::with_name("count") - .short("c") - .value_name("INTEGER") + Arg::with_name("output") + .short("o") + .long("output") + .value_name("PATH") .takes_value(true) - .required(true) - .help("The number of deposits to be submitted."), + .help("The output directory. Defaults to ~/.lighthouse/testnet"), ) .arg( - Arg::with_name("delay") - .short("d") - .value_name("MILLIS") + Arg::with_name("min-genesis-time") + .short("t") + .long("min-genesis-time") + .value_name("UNIX_EPOCH_SECONDS") .takes_value(true) - .required(true) - .help("The delay (in milliseconds) between each deposit"), + .default_value("0") + .help("The MIN_GENESIS_TIME constant."), ) .arg( - Arg::with_name("endpoint") + Arg::with_name("eth1-endpoint") .short("e") + .long("eth1-endpoint") .value_name("HTTP_SERVER") .takes_value(true) .default_value("http://localhost:8545") @@ -153,16 +147,84 @@ fn main() { .arg( Arg::with_name("confirmations") .value_name("INTEGER") + .long("confirmations") .takes_value(true) .default_value("3") .help("The number of block confirmations before declaring the contract deployed."), ) + .arg( + Arg::with_name("password") + .long("password") + .value_name("FILE") + .takes_value(true) + .help("The password file to unlock the eth1 account (see --index)"), + ) + ) + .subcommand( + SubCommand::with_name("refund-deposit-contract") + .about( + "Calls the steal() function on a testnet eth1 contract.", + ) + .arg( + Arg::with_name("testnet-dir") + .short("d") + .long("testnet-dir") + .value_name("PATH") + .takes_value(true) + .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), + ) + .arg( + Arg::with_name("eth1-endpoint") + .short("e") + .long("eth1-endpoint") + .value_name("HTTP_SERVER") + .takes_value(true) + .default_value("http://localhost:8545") + .help("The URL to the eth1 JSON-RPC http API."), + ) + .arg( + Arg::with_name("password") + .long("password") + .value_name("FILE") + .takes_value(true) + .help("The password file to unlock the eth1 account (see --index)"), + ) + .arg( + Arg::with_name("account-index") + .short("i") + .long("account-index") + .value_name("INDEX") + .takes_value(true) + .default_value("0") + .help("The eth1 accounts[] index which will send the transaction"), + ) + ) + .subcommand( + SubCommand::with_name("eth1-genesis") + .about( + "Listens to the eth1 chain and finds the genesis beacon state", + ) + .arg( + Arg::with_name("testnet-dir") + .short("d") + .long("testnet-dir") + .value_name("PATH") + .takes_value(true) + .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), + ) + .arg( + Arg::with_name("eth1-endpoint") + .short("e") + .long("eth1-endpoint") + .value_name("HTTP_SERVER") + .takes_value(true) + .default_value("http://localhost:8545") + .help("The URL to the eth1 JSON-RPC http API."), + ) ) .subcommand( SubCommand::with_name("pycli") .about("TODO") - .version("0.1.0") - .author("Paul Hauner ") .arg( Arg::with_name("pycli-path") .long("pycli-path") @@ -178,7 +240,7 @@ fn main() { let env = EnvironmentBuilder::minimal() .multi_threaded_tokio_runtime() .expect("should start tokio runtime") - .null_logger() + .async_logger("trace") .expect("should start null logger") .build() .expect("should build env"); @@ -219,7 +281,6 @@ fn main() { "mainnet" => genesis_yaml::(num_validators, genesis_time, file), _ => unreachable!("guarded by slog possible_values"), }; - info!("Genesis state YAML file created. Exiting successfully."); } ("transition-blocks", Some(matches)) => run_transition_blocks(matches) @@ -229,8 +290,16 @@ fn main() { } ("pycli", Some(matches)) => run_pycli::(matches) .unwrap_or_else(|e| error!("Failed to run pycli: {}", e)), - ("deposit-contract", Some(matches)) => run_deposit_contract::(env, matches) - .unwrap_or_else(|e| error!("Failed to run deposit contract sim: {}", e)), + ("deploy-deposit-contract", Some(matches)) => { + deploy_deposit_contract::run::(env, matches) + .unwrap_or_else(|e| error!("Failed to run deploy-deposit-contract command: {}", e)) + } + ("refund-deposit-contract", Some(matches)) => { + refund_deposit_contract::run::(env, matches) + .unwrap_or_else(|e| error!("Failed to run refund-deposit-contract command: {}", e)) + } + ("eth1-genesis", Some(matches)) => eth1_genesis::run::(env, matches) + .unwrap_or_else(|e| error!("Failed to run eth1-genesis command: {}", e)), (other, _) => error!("Unknown subcommand {}. See --help.", other), } } diff --git a/lcli/src/refund_deposit_contract.rs b/lcli/src/refund_deposit_contract.rs new file mode 100644 index 0000000000..1684a5d89b --- /dev/null +++ b/lcli/src/refund_deposit_contract.rs @@ -0,0 +1,116 @@ +use crate::deploy_deposit_contract::parse_password; +use clap::ArgMatches; +use environment::Environment; +use eth2_testnet_config::Eth2TestnetConfig; +use futures::{future, Future}; +use std::path::PathBuf; +use types::EthSpec; +use web3::{ + transports::Http, + types::{Address, TransactionRequest, U256}, + Web3, +}; + +/// `keccak("steal()")[0..4]` +pub const STEAL_FN_SIGNATURE: &[u8] = &[0xcf, 0x7a, 0x89, 0x65]; + +pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { + let endpoint = matches + .value_of("eth1-endpoint") + .ok_or_else(|| "eth1-endpoint not specified")?; + + let account_index = matches + .value_of("account-index") + .ok_or_else(|| "No account-index".to_string())? + .parse::() + .map_err(|e| format!("Unable to parse account-index: {}", e))?; + + let password_opt = parse_password(matches)?; + + let testnet_dir = matches + .value_of("testnet-dir") + .ok_or_else(|| ()) + .and_then(|dir| dir.parse::().map_err(|_| ())) + .unwrap_or_else(|_| { + dirs::home_dir() + .map(|home| home.join(".lighthouse").join("testnet")) + .expect("should locate home directory") + }); + + let eth2_testnet_config: Eth2TestnetConfig = Eth2TestnetConfig::load(testnet_dir)?; + + let (_event_loop, transport) = Http::new(&endpoint).map_err(|e| { + format!( + "Failed to start HTTP transport connected to ganache: {:?}", + e + ) + })?; + + let web3_1 = Web3::new(transport); + let web3_2 = web3_1.clone(); + + // Convert from `types::Address` to `web3::types::Address`. + let deposit_contract = Address::from_slice( + eth2_testnet_config + .deposit_contract_address()? + .as_fixed_bytes(), + ); + + let future = web3_1 + .eth() + .accounts() + .map_err(|e| format!("Failed to get accounts: {:?}", e)) + .and_then(move |accounts| { + accounts + .get(account_index) + .cloned() + .ok_or_else(|| "Insufficient accounts for deposit".to_string()) + }) + .and_then(move |from_address| { + let future: Box + Send> = + if let Some(password) = password_opt { + // Unlock for only a single transaction. + let duration = None; + + let future = web3_1 + .personal() + .unlock_account(from_address, &password, duration) + .then(move |result| match result { + Ok(true) => Ok(from_address), + Ok(false) => Err("Eth1 node refused to unlock account".to_string()), + Err(e) => Err(format!("Eth1 unlock request failed: {:?}", e)), + }); + + Box::new(future) + } else { + Box::new(future::ok(from_address)) + }; + + future + }) + .and_then(move |from| { + let tx_request = TransactionRequest { + from, + to: Some(deposit_contract), + gas: Some(U256::from(400_000)), + gas_price: None, + value: Some(U256::zero()), + data: Some(STEAL_FN_SIGNATURE.into()), + nonce: None, + condition: None, + }; + + web3_2 + .eth() + .send_transaction(tx_request) + .map_err(|e| format!("Failed to call deposit fn: {:?}", e)) + }) + .map(move |tx| info!("Refund transaction submitted: eth1_tx_hash: {:?}", tx)) + .map_err(move |e| error!("Unable to submit refund transaction: error: {}", e)); + + env.runtime() + .block_on(future) + .map_err(|()| format!("Failed to send transaction"))?; + + Ok(()) +} diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 631e799271..8aa130f7bf 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -149,7 +149,7 @@ impl RuntimeContext { /// Returns a sub-context of this context. /// /// The generated service will have the `service_name` in all it's logs. - pub fn service_context(&self, service_name: &'static str) -> Self { + pub fn service_context(&self, service_name: String) -> Self { Self { executor: self.executor.clone(), log: self.log.new(o!("service" => service_name)), @@ -170,7 +170,7 @@ pub struct Environment { runtime: Runtime, log: Logger, eth_spec_instance: E, - eth2_config: Eth2Config, + pub eth2_config: Eth2Config, } impl Environment { diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 728948ad78..72c92f88ba 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -49,7 +49,7 @@ fn main() { .help("The title of the spec constants for chain config.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("trace"), + .default_value("info"), ) .arg( Arg::with_name("datadir") @@ -57,7 +57,7 @@ fn main() { .short("d") .value_name("DIR") .global(true) - .help("Data directory for keys and databases.") + .help("Data directory for lighthouse keys and databases.") .takes_value(true), ) .subcommand(beacon_node::cli_app()) @@ -133,14 +133,12 @@ fn run( // Creating a command which can run both might be useful future works. if let Some(sub_matches) = matches.subcommand_matches("account_manager") { - let runtime_context = environment.core_context(); + // Pass the entire `environment` to the account manager so it can run blocking operations. + account_manager::run(sub_matches, environment); - account_manager::run(sub_matches, runtime_context); - - // Exit early if the account manager was run. It does not use the tokio executor, no need - // to wait for it to shutdown. + // Exit as soon as account manager returns control. return Ok(()); - } + }; let beacon_node = if let Some(sub_matches) = matches.subcommand_matches("beacon_node") { let runtime_context = environment.core_context(); diff --git a/tests/beacon_chain_sim/Cargo.toml b/tests/beacon_chain_sim/Cargo.toml index 37734a0305..3b992c07f0 100644 --- a/tests/beacon_chain_sim/Cargo.toml +++ b/tests/beacon_chain_sim/Cargo.toml @@ -10,3 +10,8 @@ edition = "2018" node_test_rig = { path = "../node_test_rig" } types = { path = "../../eth2/types" } validator_client = { path = "../../validator_client" } +parking_lot = "0.9.0" +futures = "0.1.29" +tokio = "0.1.22" +eth1_test_rig = { path = "../eth1_test_rig" } +env_logger = "0.7.1" diff --git a/tests/beacon_chain_sim/src/checks.rs b/tests/beacon_chain_sim/src/checks.rs new file mode 100644 index 0000000000..adb2854476 --- /dev/null +++ b/tests/beacon_chain_sim/src/checks.rs @@ -0,0 +1,136 @@ +use crate::local_network::LocalNetwork; +use futures::{stream, Future, IntoFuture, Stream}; +use std::time::{Duration, Instant}; +use tokio::timer::Delay; +use types::{Epoch, EthSpec, Slot, Unsigned}; + +/// Checks that all of the validators have on-boarded by the start of the second eth1 voting +/// period. +pub fn verify_initial_validator_count( + network: LocalNetwork, + slot_duration: Duration, + initial_validator_count: usize, +) -> impl Future { + slot_delay(Slot::new(1), slot_duration) + .and_then(move |()| verify_validator_count(network, initial_validator_count)) +} + +/// Checks that all of the validators have on-boarded by the start of the second eth1 voting +/// period. +pub fn verify_validator_onboarding( + network: LocalNetwork, + slot_duration: Duration, + expected_validator_count: usize, +) -> impl Future { + slot_delay( + Slot::new(E::SlotsPerEth1VotingPeriod::to_u64()), + slot_duration, + ) + .and_then(move |()| verify_validator_count(network, expected_validator_count)) +} + +/// Checks that the chain has made the first possible finalization. +/// +/// Intended to be run as soon as chain starts. +pub fn verify_first_finalization( + network: LocalNetwork, + slot_duration: Duration, +) -> impl Future { + epoch_delay(Epoch::new(4), slot_duration, E::slots_per_epoch()) + .and_then(|()| verify_all_finalized_at(network, Epoch::new(2))) +} + +/// Delays for `epochs`, plus half a slot extra. +fn epoch_delay( + epochs: Epoch, + slot_duration: Duration, + slots_per_epoch: u64, +) -> impl Future { + let duration = slot_duration * (epochs.as_u64() * slots_per_epoch) as u32 + slot_duration / 2; + + Delay::new(Instant::now() + duration).map_err(|e| format!("Epoch delay failed: {:?}", e)) +} + +/// Delays for `slots`, plus half a slot extra. +fn slot_delay(slots: Slot, slot_duration: Duration) -> impl Future { + let duration = slot_duration * slots.as_u64() as u32 + slot_duration / 2; + + Delay::new(Instant::now() + duration).map_err(|e| format!("Epoch delay failed: {:?}", e)) +} + +/// Verifies that all beacon nodes in the given network have a head state that has a finalized +/// epoch of `epoch`. +fn verify_all_finalized_at( + network: LocalNetwork, + epoch: Epoch, +) -> impl Future { + network + .remote_nodes() + .into_future() + .and_then(|remote_nodes| { + stream::unfold(remote_nodes.into_iter(), |mut iter| { + iter.next().map(|remote_node| { + remote_node + .http + .beacon() + .get_head() + .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) + .map(|epoch| (epoch, iter)) + .map_err(|e| format!("Get head via http failed: {:?}", e)) + }) + }) + .collect() + }) + .and_then(move |epochs| { + if epochs.iter().any(|node_epoch| *node_epoch != epoch) { + Err(format!( + "Nodes are not finalized at epoch {}. Finalized epochs: {:?}", + epoch, epochs + )) + } else { + Ok(()) + } + }) +} + +/// Verifies that all beacon nodes in the given `network` have a head state that contains +/// `expected_count` validators. +fn verify_validator_count( + network: LocalNetwork, + expected_count: usize, +) -> impl Future { + network + .remote_nodes() + .into_future() + .and_then(|remote_nodes| { + stream::unfold(remote_nodes.into_iter(), |mut iter| { + iter.next().map(|remote_node| { + let beacon = remote_node.http.beacon(); + beacon + .get_head() + .map_err(|e| format!("Get head via http failed: {:?}", e)) + .and_then(move |head| { + beacon + .get_state_by_root(head.state_root) + .map(|(state, _root)| state) + .map_err(|e| format!("Get state root via http failed: {:?}", e)) + }) + .map(|state| (state.validators.len(), iter)) + }) + }) + .collect() + }) + .and_then(move |validator_counts| { + if validator_counts + .iter() + .any(|count| *count != expected_count) + { + Err(format!( + "Nodes do not all have {} validators in their state. Validator counts: {:?}", + expected_count, validator_counts + )) + } else { + Ok(()) + } + }) +} diff --git a/tests/beacon_chain_sim/src/local_network.rs b/tests/beacon_chain_sim/src/local_network.rs new file mode 100644 index 0000000000..05fbc7fa26 --- /dev/null +++ b/tests/beacon_chain_sim/src/local_network.rs @@ -0,0 +1,152 @@ +use futures::{Future, IntoFuture}; +use node_test_rig::{ + environment::RuntimeContext, ClientConfig, LocalBeaconNode, LocalValidatorClient, + RemoteBeaconNode, ValidatorConfig, +}; +use parking_lot::RwLock; +use std::ops::Deref; +use std::sync::Arc; +use types::EthSpec; + +/// Helper struct to reduce `Arc` usage. +pub struct Inner { + context: RuntimeContext, + beacon_nodes: RwLock>>, + validator_clients: RwLock>>, +} + +/// Represents a set of interconnected `LocalBeaconNode` and `LocalValidatorClient`. +/// +/// Provides functions to allow adding new beacon nodes and validators. +pub struct LocalNetwork { + inner: Arc>, +} + +impl Clone for LocalNetwork { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl Deref for LocalNetwork { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + self.inner.deref() + } +} + +impl LocalNetwork { + /// Creates a new network with a single `BeaconNode`. + pub fn new( + context: RuntimeContext, + beacon_config: ClientConfig, + ) -> impl Future { + LocalBeaconNode::production(context.service_context("boot_node".into()), beacon_config).map( + |beacon_node| Self { + inner: Arc::new(Inner { + context, + beacon_nodes: RwLock::new(vec![beacon_node]), + validator_clients: RwLock::new(vec![]), + }), + }, + ) + } + + /// Returns the number of beacon nodes in the network. + /// + /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected + /// (e.g., another Lighthouse process on the same machine.) + pub fn beacon_node_count(&self) -> usize { + self.beacon_nodes.read().len() + } + + /// Returns the number of validator clients in the network. + /// + /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected + /// (e.g., another Lighthouse process on the same machine.) + pub fn validator_client_count(&self) -> usize { + self.validator_clients.read().len() + } + + /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. + pub fn add_beacon_node( + &self, + mut beacon_config: ClientConfig, + ) -> impl Future { + let self_1 = self.clone(); + + self.beacon_nodes + .read() + .first() + .map(|boot_node| { + beacon_config.network.boot_nodes.push( + boot_node + .client + .enr() + .expect("bootnode must have a network"), + ); + }) + .expect("should have atleast one node"); + + let index = self.beacon_nodes.read().len(); + + LocalBeaconNode::production( + self.context.service_context(format!("node_{}", index)), + beacon_config, + ) + .map(move |beacon_node| { + self_1.beacon_nodes.write().push(beacon_node); + }) + } + + /// Adds a validator client to the network, connecting it to the beacon node with index + /// `beacon_node`. + pub fn add_validator_client( + &self, + mut validator_config: ValidatorConfig, + beacon_node: usize, + keypair_indices: Vec, + ) -> impl Future { + let index = self.validator_clients.read().len(); + let context = self.context.service_context(format!("validator_{}", index)); + let self_1 = self.clone(); + + self.beacon_nodes + .read() + .get(beacon_node) + .map(move |beacon_node| { + let socket_addr = beacon_node + .client + .http_listen_addr() + .expect("Must have http started"); + + validator_config.http_server = + format!("http://{}:{}", socket_addr.ip(), socket_addr.port()); + + validator_config + }) + .ok_or_else(|| format!("No beacon node for index {}", beacon_node)) + .into_future() + .and_then(move |validator_config| { + LocalValidatorClient::production_with_insecure_keypairs( + context, + validator_config, + &keypair_indices, + ) + }) + .map(move |validator_client| self_1.validator_clients.write().push(validator_client)) + } + + /// For all beacon nodes in `Self`, return a HTTP client to access each nodes HTTP API. + pub fn remote_nodes(&self) -> Result>, String> { + let beacon_nodes = self.beacon_nodes.read(); + + beacon_nodes + .iter() + .map(|beacon_node| beacon_node.remote_node()) + .collect() + } +} diff --git a/tests/beacon_chain_sim/src/main.rs b/tests/beacon_chain_sim/src/main.rs index 0853096d0d..99e5414b33 100644 --- a/tests/beacon_chain_sim/src/main.rs +++ b/tests/beacon_chain_sim/src/main.rs @@ -1,131 +1,243 @@ -use node_test_rig::{ - environment::{Environment, EnvironmentBuilder, RuntimeContext}, - testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, LocalValidatorClient, - ProductionClient, ValidatorConfig, -}; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::EthSpec; +//! This crate provides a simluation that creates `n` beacon node and validator clients, each with +//! `v` validators. A deposit contract is deployed at the start of the simulation using a local +//! `ganache-cli` instance (you must have `ganache-cli` installed and avaliable on your path). All +//! beacon nodes independently listen for genesis from the deposit contract, then start operating. +//! +//! As the simulation runs, there are checks made to ensure that all components are running +//! correctly. If any of these checks fail, the simulation will exit immediately. +//! +//! By default, the simulation will end as soon as all checks have finished. It may be configured +//! to run indefinitely by setting `end_after_checks = false`. +//! +//! ## Future works +//! +//! Presently all the beacon nodes and validator clients all log to stdout. Additionally, the +//! simulation uses `println` to communicate some info. It might be nice if the nodes logged to +//! easy-to-find files and stdout only contained info from the simulation. +//! +//! It would also be nice to add a CLI using `clap` so that the variables in `main()` can be +//! changed without a recompile. -pub type BeaconNode = LocalBeaconNode>; +mod checks; +mod local_network; + +use env_logger::{Builder, Env}; +use eth1_test_rig::GanacheEth1Instance; +use futures::{future, stream, Future, Stream}; +use local_network::LocalNetwork; +use node_test_rig::{ + environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, +}; +use std::time::{Duration, Instant}; +use tokio::timer::Interval; +use types::MinimalEthSpec; + +pub type E = MinimalEthSpec; fn main() { - let nodes = 4; - let validators_per_node = 64 / nodes; + // Debugging output for libp2p and external crates. + Builder::from_env(Env::default()).init(); - match simulation(nodes, validators_per_node) { + let nodes = 4; + let validators_per_node = 20; + let log_level = "debug"; + let speed_up_factor = 4; + let end_after_checks = true; + + match async_sim( + nodes, + validators_per_node, + speed_up_factor, + log_level, + end_after_checks, + ) { Ok(()) => println!("Simulation exited successfully"), - Err(e) => println!("Simulation exited with error: {}", e), + Err(e) => { + eprintln!("Simulation exited with error: {}", e); + std::process::exit(1) + } } } -fn simulation(num_nodes: usize, validators_per_node: usize) -> Result<(), String> { - if num_nodes < 1 { - return Err("Must have at least one node".into()); - } - +fn async_sim( + node_count: usize, + validators_per_node: usize, + speed_up_factor: u64, + log_level: &str, + end_after_checks: bool, +) -> Result<(), String> { let mut env = EnvironmentBuilder::minimal() - .async_logger("debug")? + .async_logger(log_level)? .multi_threaded_tokio_runtime()? .build()?; - let mut base_config = testing_client_config(); + let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("should get system time") - .as_secs(); - base_config.genesis = ClientGenesis::Interop { - genesis_time: now, - validator_count: num_nodes * validators_per_node, - }; + let spec = &mut env.eth2_config.spec; - let boot_node = - BeaconNode::production(env.service_context("boot_node".into()), base_config.clone()); + spec.milliseconds_per_slot = spec.milliseconds_per_slot / speed_up_factor; + spec.eth1_follow_distance = 16; + spec.seconds_per_day = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 64; - let mut nodes = (1..num_nodes) - .map(|i| { - let context = env.service_context(format!("node_{}", i)); - new_with_bootnode_via_enr(context, &boot_node, base_config.clone()) + let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); + let initial_validator_count = spec.min_genesis_active_validator_count as usize; + let total_validator_count = validators_per_node * node_count; + let deposit_amount = env.eth2_config.spec.max_effective_balance; + + let context = env.core_context(); + let executor = context.executor.clone(); + + let future = GanacheEth1Instance::new() + /* + * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit + * validators. + */ + .map(move |ganache_eth1_instance| { + let deposit_contract = ganache_eth1_instance.deposit_contract; + let ganache = ganache_eth1_instance.ganache; + let eth1_endpoint = ganache.endpoint(); + let deposit_contract_address = deposit_contract.address(); + + // Start a timer that produces eth1 blocks on an interval. + executor.spawn( + Interval::new(Instant::now(), eth1_block_time) + .map_err(|_| eprintln!("Eth1 block timer failed")) + .for_each(move |_| ganache.evm_mine().map_err(|_| ())) + .map_err(|_| eprintln!("Eth1 evm_mine failed")) + .map(|_| ()), + ); + + // Submit deposits to the deposit contract. + executor.spawn( + stream::unfold(0..total_validator_count, move |mut iter| { + iter.next().map(|i| { + println!("Submitting deposit for validator {}...", i); + deposit_contract + .deposit_deterministic_async::(i, deposit_amount) + .map(|_| ((), iter)) + }) + }) + .collect() + .map(|_| ()) + .map_err(|e| eprintln!("Error submitting deposit: {}", e)), + ); + + let mut beacon_config = testing_client_config(); + + beacon_config.genesis = ClientGenesis::DepositContract; + beacon_config.eth1.endpoint = eth1_endpoint; + beacon_config.eth1.deposit_contract_address = deposit_contract_address; + beacon_config.eth1.deposit_contract_deploy_block = 0; + beacon_config.eth1.lowest_cached_block_number = 0; + beacon_config.eth1.follow_distance = 1; + beacon_config.dummy_eth1_backend = false; + beacon_config.sync_eth1_chain = true; + + beacon_config }) - .collect::>(); - - let _validators = nodes - .iter() - .enumerate() - .map(|(i, node)| { - let mut context = env.service_context(format!("validator_{}", i)); - - // Pull the spec from the beacon node's beacon chain, in case there were some changes - // to the spec after the node booted. - context.eth2_config.spec = node - .client - .beacon_chain() - .expect("should have beacon chain") - .spec - .clone(); - - let context = env.service_context(format!("validator_{}", i)); - - let indices = - (i * validators_per_node..(i + 1) * validators_per_node).collect::>(); - new_validator_client( - &mut env, - context, - node, - ValidatorConfig::default(), - &indices, - ) + /* + * Create a new `LocalNetwork` with one beacon node. + */ + .and_then(move |beacon_config| { + LocalNetwork::new(context, beacon_config.clone()) + .map(|network| (network, beacon_config)) }) - .collect::>(); + /* + * One by one, add beacon nodes to the network. + */ + .and_then(move |(network, beacon_config)| { + let network_1 = network.clone(); - nodes.insert(0, boot_node); + stream::unfold(0..node_count - 1, move |mut iter| { + iter.next().map(|_| { + network_1 + .add_beacon_node(beacon_config.clone()) + .map(|()| ((), iter)) + }) + }) + .collect() + .map(|_| network) + }) + /* + * One by one, add validator clients to the network. Each validator client is attached to + * a single corresponding beacon node. + */ + .and_then(move |network| { + let network_1 = network.clone(); - env.block_until_ctrl_c()?; + // Note: presently the validator client future will only resolve once genesis time + // occurs. This is great for this scenario, but likely to change in the future. + // + // If the validator client future behaviour changes, we would need to add a new future + // that delays until genesis. Otherwise, all of the checks that start in the next + // future will start too early. - Ok(()) -} - -// TODO: this function does not result in nodes connecting to each other. This is a bug due to -// using a 0 port for discovery. Age is fixing it. -fn new_with_bootnode_via_enr( - context: RuntimeContext, - boot_node: &BeaconNode, - base_config: ClientConfig, -) -> BeaconNode { - let mut config = base_config; - config.network.boot_nodes.push( - boot_node - .client - .enr() - .expect("bootnode must have a network"), - ); - - BeaconNode::production(context, config) -} - -// Note: this function will block until the validator can connect to the beaco node. It is -// recommended to ensure that the beacon node is running first. -fn new_validator_client( - env: &mut Environment, - context: RuntimeContext, - beacon_node: &BeaconNode, - base_config: ValidatorConfig, - keypair_indices: &[usize], -) -> LocalValidatorClient { - let mut config = base_config; - - let socket_addr = beacon_node - .client - .http_listen_addr() - .expect("Must have http started"); - - config.http_server = format!("http://{}:{}", socket_addr.ip(), socket_addr.port()); - - env.runtime() - .block_on(LocalValidatorClient::production_with_insecure_keypairs( - context, - config, - keypair_indices, - )) - .expect("should start validator") + stream::unfold(0..node_count, move |mut iter| { + iter.next().map(|i| { + let indices = (i * validators_per_node..(i + 1) * validators_per_node) + .collect::>(); + + network_1 + .add_validator_client(ValidatorConfig::default(), i, indices) + .map(|()| ((), iter)) + }) + }) + .collect() + .map(|_| network) + }) + /* + * Start the processes that will run checks on the network as it runs. + */ + .and_then(move |network| { + // The `final_future` either completes immediately or never completes, depending on the value + // of `end_after_checks`. + let final_future: Box + Send> = + if end_after_checks { + Box::new(future::ok(()).map_err(|()| "".to_string())) + } else { + Box::new(future::empty().map_err(|()| "".to_string())) + }; + + future::ok(()) + // Check that the chain finalizes at the first given opportunity. + .join(checks::verify_first_finalization( + network.clone(), + slot_duration, + )) + // Check that the chain starts with the expected validator count. + .join(checks::verify_initial_validator_count( + network.clone(), + slot_duration, + initial_validator_count, + )) + // Check that validators greater than `spec.min_genesis_active_validator_count` are + // onboarded at the first possible opportunity. + .join(checks::verify_validator_onboarding( + network.clone(), + slot_duration, + total_validator_count, + )) + // End now or run forever, depending on the `end_after_checks` flag. + .join(final_future) + .map(|_| network) + }) + /* + * End the simulation by dropping the network. This will kill all running beacon nodes and + * validator clients. + */ + .map(|network| { + println!( + "Simulation complete. Finished with {} beacon nodes and {} validator clients", + network.beacon_node_count(), + network.validator_client_count() + ); + + // Be explicit about dropping the network, as this kills all the nodes. This ensures + // all the checks have adequate time to pass. + drop(network) + }); + + env.runtime().block_on(future) } diff --git a/tests/eth1_test_rig/src/lib.rs b/tests/eth1_test_rig/src/lib.rs index 1c404e2d5f..0eaa29c3e9 100644 --- a/tests/eth1_test_rig/src/lib.rs +++ b/tests/eth1_test_rig/src/lib.rs @@ -7,17 +7,17 @@ //! some initial issues. mod ganache; -use deposit_contract::{eth1_tx_data, ABI, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS}; -use futures::{stream, Future, IntoFuture, Stream}; +use deposit_contract::{eth1_tx_data, testnet, ABI, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS}; +use futures::{future, stream, Future, IntoFuture, Stream}; use ganache::GanacheInstance; use std::time::{Duration, Instant}; use tokio::{runtime::Runtime, timer::Delay}; use types::DepositData; -use types::{EthSpec, Hash256, Keypair, Signature}; +use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature}; use web3::contract::{Contract, Options}; use web3::transports::Http; use web3::types::{Address, TransactionRequest, U256}; -use web3::{Transport, Web3}; +use web3::Web3; pub const DEPLOYER_ACCOUNTS_INDEX: usize = 0; pub const DEPOSIT_ACCOUNTS_INDEX: usize = 0; @@ -31,7 +31,7 @@ pub struct GanacheEth1Instance { impl GanacheEth1Instance { pub fn new() -> impl Future { GanacheInstance::new().into_future().and_then(|ganache| { - DepositContract::deploy(ganache.web3.clone(), 0).map(|deposit_contract| Self { + DepositContract::deploy(ganache.web3.clone(), 0, None).map(|deposit_contract| Self { ganache, deposit_contract, }) @@ -58,21 +58,52 @@ impl DepositContract { pub fn deploy( web3: Web3, confirmations: usize, + password: Option, + ) -> impl Future { + Self::deploy_bytecode(web3, confirmations, BYTECODE, ABI, password) + } + + pub fn deploy_testnet( + web3: Web3, + confirmations: usize, + password: Option, + ) -> impl Future { + Self::deploy_bytecode( + web3, + confirmations, + testnet::BYTECODE, + testnet::ABI, + password, + ) + } + + fn deploy_bytecode( + web3: Web3, + confirmations: usize, + bytecode: &[u8], + abi: &[u8], + password: Option, ) -> impl Future { let web3_1 = web3.clone(); - deploy_deposit_contract(web3.clone(), confirmations) - .map_err(|e| { - format!( - "Failed to deploy contract: {}. Is scripts/ganache_tests_node.sh running?.", - e - ) - }) - .and_then(move |address| { - Contract::from_json(web3_1.eth(), address, ABI) - .map_err(|e| format!("Failed to init contract: {:?}", e)) - }) - .map(|contract| Self { contract, web3 }) + deploy_deposit_contract( + web3.clone(), + confirmations, + bytecode.to_vec(), + abi.to_vec(), + password, + ) + .map_err(|e| { + format!( + "Failed to deploy contract: {}. Is scripts/ganache_tests_node.sh running?.", + e + ) + }) + .and_then(move |address| { + Contract::from_json(web3_1.eth(), address, ABI) + .map_err(|e| format!("Failed to init contract: {:?}", e)) + }) + .map(|contract| Self { contract, web3 }) } /// The deposit contract's address in `0x00ab...` format. @@ -125,6 +156,25 @@ impl DepositContract { .map_err(|e| format!("Deposit failed: {:?}", e)) } + pub fn deposit_deterministic_async( + &self, + keypair_index: usize, + amount: u64, + ) -> impl Future { + let keypair = generate_deterministic_keypair(keypair_index); + + let mut deposit = DepositData { + pubkey: keypair.pk.into(), + withdrawal_credentials: Hash256::zero(), + amount, + signature: Signature::empty_signature().into(), + }; + + deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); + + self.deposit_async(deposit) + } + /// Performs a non-blocking deposit. pub fn deposit_async( &self, @@ -208,11 +258,15 @@ fn from_gwei(gwei: u64) -> U256 { /// Deploys the deposit contract to the given web3 instance using the account with index /// `DEPLOYER_ACCOUNTS_INDEX`. -fn deploy_deposit_contract( - web3: Web3, +fn deploy_deposit_contract( + web3: Web3, confirmations: usize, + bytecode: Vec, + abi: Vec, + password_opt: Option, ) -> impl Future { - let bytecode = String::from_utf8_lossy(&BYTECODE); + let bytecode = String::from_utf8(bytecode).expect("bytecode must be valid utf8"); + let web3_1 = web3.clone(); web3.eth() .accounts() @@ -223,8 +277,30 @@ fn deploy_deposit_contract( .cloned() .ok_or_else(|| "Insufficient accounts for deployer".to_string()) }) + .and_then(move |from_address| { + let future: Box + Send> = + if let Some(password) = password_opt { + // Unlock for only a single transaction. + let duration = None; + + let future = web3_1 + .personal() + .unlock_account(from_address, &password, duration) + .then(move |result| match result { + Ok(true) => Ok(from_address), + Ok(false) => Err("Eth1 node refused to unlock account".to_string()), + Err(e) => Err(format!("Eth1 unlock request failed: {:?}", e)), + }); + + Box::new(future) + } else { + Box::new(future::ok(from_address)) + }; + + future + }) .and_then(move |deploy_address| { - Contract::deploy(web3.eth(), &ABI) + Contract::deploy(web3.eth(), &abi) .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? .confirmations(confirmations) .options(Options { diff --git a/tests/node_test_rig/src/lib.rs b/tests/node_test_rig/src/lib.rs index d194801eed..b1d2e82c4b 100644 --- a/tests/node_test_rig/src/lib.rs +++ b/tests/node_test_rig/src/lib.rs @@ -2,10 +2,9 @@ //! //! Intended to be used for testing and simulation purposes. Not for production. -use beacon_node::{beacon_chain::BeaconChainTypes, Client, ProductionBeaconNode}; +use beacon_node::ProductionBeaconNode; use environment::RuntimeContext; use futures::Future; -use remote_beacon_node::RemoteBeaconNode; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use tempdir::TempDir; @@ -14,22 +13,26 @@ use validator_client::{KeySource, ProductionValidatorClient}; pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient}; pub use environment; +pub use remote_beacon_node::RemoteBeaconNode; pub use validator_client::Config as ValidatorConfig; /// Provids a beacon node that is running in the current process on a given tokio executor (it /// is _local_ to this process). /// /// Intended for use in testing and simulation. Not for production. -pub struct LocalBeaconNode { - pub client: T, +pub struct LocalBeaconNode { + pub client: ProductionClient, pub datadir: TempDir, } -impl LocalBeaconNode> { +impl LocalBeaconNode { /// Starts a new, production beacon node on the tokio runtime in the given `context`. /// /// The node created is using the same types as the node we use in production. - pub fn production(context: RuntimeContext, mut client_config: ClientConfig) -> Self { + pub fn production( + context: RuntimeContext, + mut client_config: ClientConfig, + ) -> impl Future { // Creates a temporary directory that will be deleted once this `TempDir` is dropped. let datadir = TempDir::new("lighthouse_node_test_rig") .expect("should create temp directory for client datadir"); @@ -37,19 +40,17 @@ impl LocalBeaconNode> { client_config.data_dir = datadir.path().into(); client_config.network.network_dir = PathBuf::from(datadir.path()).join("network"); - let client = ProductionBeaconNode::new(context, client_config) - .wait() - .expect("should build production client") - .into_inner(); - - LocalBeaconNode { client, datadir } + ProductionBeaconNode::new(context, client_config).map(move |client| Self { + client: client.into_inner(), + datadir, + }) } } -impl LocalBeaconNode> { +impl LocalBeaconNode { /// Returns a `RemoteBeaconNode` that can connect to `self`. Useful for testing the node as if /// it were external this process. - pub fn remote_node(&self) -> Result, String> { + pub fn remote_node(&self) -> Result, String> { let socket_addr = self .client .http_listen_addr() diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 67920f9cf5..daec479468 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -165,9 +165,7 @@ impl AttestationService { } Ok(()) - }) - // Prevent any errors from escaping and stopping the interval. - .then(|_| Ok(())), + }), ) .map(move |_| info!(log_3, "Shutdown complete")), ); diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 4fc5370741..78731d18c9 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -146,9 +146,7 @@ impl BlockService { "error" => format!("{}", e) } }) - .for_each(move |_| service.clone().do_update()) - // Prevent any errors from escaping and stopping the interval. - .then(|_| Ok(())), + .for_each(move |_| service.clone().do_update().then(|_| Ok(()))), ) .map(move |_| info!(log_2, "Shutdown complete")), ); diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 1416e228c2..3e6f4ee492 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -3,6 +3,7 @@ use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; pub const DEFAULT_HTTP_SERVER: &str = "http://localhost:5052/"; +pub const DEFAULT_DATA_DIR: &str = ".lighthouse/validators"; /// Specifies a method for obtaining validator keypairs. #[derive(Clone)] @@ -53,6 +54,19 @@ impl Config { pub fn from_cli(cli_args: &ArgMatches) -> Result { let mut config = Config::default(); + // Read the `--datadir` flag. + // + // If it's not present, try and find the home directory (`~`) and push the default data + // directory onto it. If the home directory is not available, use the present directory. + config.data_dir = cli_args + .value_of("datadir") + .map(PathBuf::from) + .unwrap_or_else(|| { + dirs::home_dir() + .map(|home| home.join(DEFAULT_DATA_DIR)) + .unwrap_or_else(|| PathBuf::from(".")) + }); + if let Some(server) = cli_args.value_of("server") { config.http_server = server.to_string(); } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index de29180cd0..c83ebef8b0 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -268,9 +268,7 @@ impl DutiesService { "error" => format!("{}", e) } }) - .for_each(move |_| service.clone().do_update()) - // Prevent any errors from escaping and stopping the interval. - .then(|_| Ok(())), + .for_each(move |_| service.clone().do_update().then(|_| Ok(()))), ) .map(move |_| info!(log_2, "Shutdown complete")), ); diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs index 4bc8fb355c..9ff3a0bf56 100644 --- a/validator_client/src/fork_service.rs +++ b/validator_client/src/fork_service.rs @@ -136,9 +136,7 @@ impl ForkService { "error" => format!("{}", e) } }) - .for_each(move |_| service.do_update()) - // Prevent any errors from escaping and stopping the interval. - .then(|_| Ok(())), + .for_each(move |_| service.do_update().then(|_| Ok(()))), ) .map(move |_| info!(log_2, "Shutdown complete")), ); diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 8ac42cf650..b727dcb16b 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -27,6 +27,7 @@ use slog::{error, info, Logger}; use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; use std::time::{Duration, Instant}; +use std::time::{SystemTime, UNIX_EPOCH}; use tokio::timer::Delay; use types::EthSpec; use validator_store::ValidatorStore; @@ -68,6 +69,7 @@ impl ProductionValidatorClient { let log_1 = context.log.clone(); let log_2 = context.log.clone(); let log_3 = context.log.clone(); + let log_4 = context.log.clone(); info!( log_1, @@ -97,6 +99,51 @@ impl ProductionValidatorClient { .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e)) }) .and_then(move |(beacon_node, remote_eth2_config, genesis_time)| { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .into_future() + .map_err(|e| format!("Unable to read system time: {:?}", e)) + .and_then(move |now| { + let log = log_3.clone(); + let genesis = Duration::from_secs(genesis_time); + + // If the time now is less than (prior to) genesis, then delay until the + // genesis instant. + // + // If the validator client starts before genesis, it will get errors from + // the slot clock. + let box_future: Box + Send> = if now + < genesis + { + info!( + log, + "Starting node prior to genesis"; + "seconds_to_wait" => (genesis - now).as_secs() + ); + + Box::new( + Delay::new(Instant::now() + (genesis - now)) + .map_err(|e| { + format!("Unable to create genesis wait delay: {:?}", e) + }) + .map(move |_| (beacon_node, remote_eth2_config, genesis_time)), + ) + } else { + info!( + log, + "Genesis has already occurred"; + "seconds_ago" => (now - genesis).as_secs() + ); + + Box::new(future::ok((beacon_node, remote_eth2_config, genesis_time))) + }; + + box_future + }) + }) + .and_then(move |(beacon_node, remote_eth2_config, genesis_time)| { + let log = log_4.clone(); + // Do not permit a connection to a beacon node using different spec constants. if context.eth2_config.spec_constants != remote_eth2_config.spec_constants { return Err(format!( @@ -123,7 +170,7 @@ impl ProductionValidatorClient { let fork_service = ForkServiceBuilder::new() .slot_clock(slot_clock.clone()) .beacon_node(beacon_node.clone()) - .runtime_context(context.service_context("fork")) + .runtime_context(context.service_context("fork".into())) .build()?; let validator_store: ValidatorStore = @@ -135,7 +182,7 @@ impl ProductionValidatorClient { config.data_dir.clone(), context.eth2_config.spec.clone(), fork_service.clone(), - log_3.clone(), + log.clone(), )?, // Generate ephemeral insecure keypairs for testing purposes. // @@ -145,13 +192,13 @@ impl ProductionValidatorClient { &indices, context.eth2_config.spec.clone(), fork_service.clone(), - log_3.clone(), + log.clone(), )? } }; info!( - log_3, + log, "Loaded validator keypair store"; "voting_validators" => validator_store.num_voting_validators() ); @@ -160,7 +207,7 @@ impl ProductionValidatorClient { .slot_clock(slot_clock.clone()) .validator_store(validator_store.clone()) .beacon_node(beacon_node.clone()) - .runtime_context(context.service_context("duties")) + .runtime_context(context.service_context("duties".into())) .build()?; let block_service = BlockServiceBuilder::new() @@ -168,7 +215,7 @@ impl ProductionValidatorClient { .slot_clock(slot_clock.clone()) .validator_store(validator_store.clone()) .beacon_node(beacon_node.clone()) - .runtime_context(context.service_context("block")) + .runtime_context(context.service_context("block".into())) .build()?; let attestation_service = AttestationServiceBuilder::new() @@ -176,7 +223,7 @@ impl ProductionValidatorClient { .slot_clock(slot_clock) .validator_store(validator_store) .beacon_node(beacon_node) - .runtime_context(context.service_context("attestation")) + .runtime_context(context.service_context("attestation".into())) .build()?; Ok(Self { diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 4256ce4939..96c945729e 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -34,7 +34,7 @@ impl ValidatorStore { log: Logger, ) -> Result { let validator_iter = read_dir(&base_dir) - .map_err(|e| format!("Failed to read base directory: {:?}", e))? + .map_err(|e| format!("Failed to read base directory {:?}: {:?}", base_dir, e))? .filter_map(|validator_dir| { let path = validator_dir.ok()?.path();