From 3c8fe00510317e35e98216870148fa7249b9800f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 18 Mar 2020 09:25:29 +1100 Subject: [PATCH 01/24] Add sync-speed metric (#898) --- Cargo.lock | 2 ++ beacon_node/client/Cargo.toml | 2 ++ beacon_node/client/src/lib.rs | 1 + beacon_node/client/src/metrics.rs | 9 +++++++++ beacon_node/client/src/notifier.rs | 3 +++ 5 files changed, 17 insertions(+) create mode 100644 beacon_node/client/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index ce0a7e69ab..895423b9ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -520,7 +520,9 @@ dependencies = [ "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "genesis 0.1.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "lighthouse_bootstrap 0.1.0", + "lighthouse_metrics 0.1.0", "network 0.1.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8feed866e4..a53ce5455c 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -38,3 +38,5 @@ genesis = { path = "../genesis" } environment = { path = "../../lighthouse/environment" } lighthouse_bootstrap = { path = "../../eth2/utils/lighthouse_bootstrap" } eth2_ssz = { path = "../../eth2/utils/ssz" } +lazy_static = "1.4.0" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 6f1214ce50..0d5155c6f1 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,6 +1,7 @@ extern crate slog; pub mod config; +mod metrics; mod notifier; pub mod builder; diff --git a/beacon_node/client/src/metrics.rs b/beacon_node/client/src/metrics.rs new file mode 100644 index 0000000000..5598fde220 --- /dev/null +++ b/beacon_node/client/src/metrics.rs @@ -0,0 +1,9 @@ +use lazy_static::lazy_static; +pub use lighthouse_metrics::*; + +lazy_static! { + pub static ref SYNC_SLOTS_PER_SECOND: Result = try_create_int_gauge( + "sync_slots_per_second", + "The number of blocks being imported per second" + ); +} diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index eddebce1f3..85e15049ec 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,3 +1,4 @@ +use crate::metrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; use environment::RuntimeContext; use exit_future::Signal; @@ -83,6 +84,8 @@ pub fn spawn_notifier( let mut speedo = speedo.lock(); speedo.observe(head_slot, Instant::now()); + metrics::set_gauge(&metrics::SYNC_SLOTS_PER_SECOND, speedo.slots_per_second().unwrap_or_else(|| 0_f64) as i64); + // The next two lines take advantage of saturating subtraction on `Slot`. let head_distance = current_slot - head_slot; From f160f7a21b477790c6d08f8294cfe19e5d1e823b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 18 Mar 2020 16:42:49 +1100 Subject: [PATCH 02/24] Book corrections to allow http api access for docker use (#911) * Book corrections to allow http api access for docker use * Limit beacon node API to localhost * Add localhost to comment --- book/src/docker.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/book/src/docker.md b/book/src/docker.md index 96be643fa5..b4159c17ee 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -49,7 +49,7 @@ $ docker run lighthouse:local lighthouse --help You can run a Docker beacon node with the following command: ```bash -$ docker run -p 9000:9000 -p 5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon --http +$ docker run -p 9000:9000 -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 ``` > The `-p` and `-v` and values are described below. @@ -78,8 +78,8 @@ $ docker run -p 9000:9000 sigp/lighthouse lighthouse beacon ``` If you use the `--http` flag you may also want to expose the HTTP port with `-p -5052:5052`. +127.0.0.1:5052:5052`. ```bash -$ docker run -p 9000:9000 -p 5052:5052 sigp/lighthouse lighthouse beacon --http +$ docker run -p 9000:9000 -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 ``` From 70e39cc6a1bd2c1e07c7ed62fdcd93b92e7dbf8f Mon Sep 17 00:00:00 2001 From: Herman Junge Date: Thu, 19 Mar 2020 00:02:42 +0000 Subject: [PATCH 03/24] Update reward calculations to v0.11.0 - Handle u64 overflow (#920) (#921) * Reference https://github.com/ethereum/eth2.0-specs/pull/1635 * Suffix '_ebi' on each effected variable --- .../src/per_epoch_processing/apply_rewards.rs | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index 0cb271d60e..83588ad7ae 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -133,7 +133,7 @@ fn get_attestation_deltas( /// Determine the delta for a single validator, sans proposer rewards. /// -/// Spec v0.10.1 +/// Spec v0.11.0 fn get_attestation_delta( validator: &ValidatorStatus, total_balances: &TotalBalances, @@ -152,16 +152,24 @@ fn get_attestation_delta( return delta; } - let total_balance = total_balances.current_epoch; - let total_attesting_balance = total_balances.previous_epoch_attesters; - let matching_target_balance = total_balances.previous_epoch_target_attesters; - let matching_head_balance = total_balances.previous_epoch_head_attesters; + // Handle integer overflow by dividing these quantities by EFFECTIVE_BALANCE_INCREMENT + // Spec: + // - increment = EFFECTIVE_BALANCE_INCREMENT + // - reward_numerator = get_base_reward(state, index) * (attesting_balance // increment) + // - rewards[index] = reward_numerator // (total_balance // increment) + let total_balance_ebi = total_balances.current_epoch / spec.effective_balance_increment; + let total_attesting_balance_ebi = + total_balances.previous_epoch_attesters / spec.effective_balance_increment; + let matching_target_balance_ebi = + total_balances.previous_epoch_target_attesters / spec.effective_balance_increment; + let matching_head_balance_ebi = + total_balances.previous_epoch_head_attesters / spec.effective_balance_increment; // Expected FFG source. // Spec: // - validator index in `get_unslashed_attesting_indices(state, matching_source_attestations)` if validator.is_previous_epoch_attester && !validator.is_slashed { - delta.reward(base_reward * total_attesting_balance / total_balance); + delta.reward(base_reward * total_attesting_balance_ebi / total_balance_ebi); // Inclusion speed bonus let proposer_reward = base_reward / spec.proposer_reward_quotient; let max_attester_reward = base_reward - proposer_reward; @@ -177,7 +185,7 @@ fn get_attestation_delta( // Spec: // - validator index in `get_unslashed_attesting_indices(state, matching_target_attestations)` if validator.is_previous_epoch_target_attester && !validator.is_slashed { - delta.reward(base_reward * matching_target_balance / total_balance); + delta.reward(base_reward * matching_target_balance_ebi / total_balance_ebi); } else { delta.penalize(base_reward); } @@ -186,7 +194,7 @@ fn get_attestation_delta( // Spec: // - validator index in `get_unslashed_attesting_indices(state, matching_head_attestations)` if validator.is_previous_epoch_head_attester && !validator.is_slashed { - delta.reward(base_reward * matching_head_balance / total_balance); + delta.reward(base_reward * matching_head_balance_ebi / total_balance_ebi); } else { delta.penalize(base_reward); } From 8c716b2e923c2032107b8a5c6db38c92fdc57bfa Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Thu, 19 Mar 2020 09:22:15 +0900 Subject: [PATCH 04/24] Fix incomplete build in case of the machine is offline (#935) --- eth2/utils/eth2_testnet_config/build.rs | 32 +++++++++++++++---------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/eth2/utils/eth2_testnet_config/build.rs b/eth2/utils/eth2_testnet_config/build.rs index 8f2817d86a..e2f7d71b39 100644 --- a/eth2/utils/eth2_testnet_config/build.rs +++ b/eth2/utils/eth2_testnet_config/build.rs @@ -8,23 +8,29 @@ use std::path::PathBuf; const TESTNET_ID: &str = "testnet5"; fn main() { - match get_all_files() { - Ok(()) => (), - Err(e) => panic!(e), + if !base_dir().exists() { + std::fs::create_dir_all(base_dir()).expect(&format!("Unable to create {:?}", base_dir())); + + match get_all_files() { + Ok(()) => (), + Err(e) => { + std::fs::remove_dir_all(base_dir()).expect(&format!( + "{}. Failed to remove {:?}, please remove the directory manually because it may contains incomplete testnet data.", + e, + base_dir(), + )); + panic!(e); + } + } } } pub fn get_all_files() -> Result<(), String> { - if !base_dir().exists() { - std::fs::create_dir_all(base_dir()) - .map_err(|e| format!("Unable to create {:?}: {}", base_dir(), e))?; - - get_file("boot_enr.yaml")?; - get_file("config.yaml")?; - get_file("deploy_block.txt")?; - get_file("deposit_contract.txt")?; - get_file("genesis.ssz")?; - } + get_file("boot_enr.yaml")?; + get_file("config.yaml")?; + get_file("deploy_block.txt")?; + get_file("deposit_contract.txt")?; + get_file("genesis.ssz")?; Ok(()) } From a5fbaef469df221f16b84601e78a83b009e13247 Mon Sep 17 00:00:00 2001 From: /raw PONG _GHMoaCXLT <58883403+q9f@users.noreply.github.com> Date: Fri, 20 Mar 2020 02:18:31 +0100 Subject: [PATCH 05/24] Github: Add version to issue template (#937) --- .github/ISSUE_TEMPLATE.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 7ecb9e5b0f..9c09d86bad 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -2,6 +2,11 @@ Please provide a brief description of the issue. +## Version + +Please provide your Lighthouse and Rust version. Are you building from +`master`, which commit? + ## Present Behaviour Describe the present behaviour of the application, with regards to this From 47aef629d1040f2006dd0732b597b510861cedb3 Mon Sep 17 00:00:00 2001 From: divma Date: Sun, 22 Mar 2020 20:07:41 -0500 Subject: [PATCH 06/24] move the parent lookup process to a dedicated thread (#906) * Upgrade the parent lookup logic * Apply reviewer suggestions * move the parent lookup process to a dedicated thread * move the logic of parent lookup and range syncing to a block processor * review suggestions * more review suggestions * Add small logging changes * Process parent lookups in reverse Co-authored-by: Age Manning --- ...batch_processing.rs => block_processor.rs} | 105 +++++++++++++----- beacon_node/network/src/sync/manager.rs | 95 +++++----------- beacon_node/network/src/sync/mod.rs | 1 + .../network/src/sync/range_sync/chain.rs | 60 ++++++---- .../network/src/sync/range_sync/mod.rs | 3 +- .../network/src/sync/range_sync/range.rs | 26 +++-- 6 files changed, 157 insertions(+), 133 deletions(-) rename beacon_node/network/src/sync/{range_sync/batch_processing.rs => block_processor.rs} (66%) diff --git a/beacon_node/network/src/sync/range_sync/batch_processing.rs b/beacon_node/network/src/sync/block_processor.rs similarity index 66% rename from beacon_node/network/src/sync/range_sync/batch_processing.rs rename to beacon_node/network/src/sync/block_processor.rs index 484bab9bac..cfb82eb8fb 100644 --- a/beacon_node/network/src/sync/range_sync/batch_processing.rs +++ b/beacon_node/network/src/sync/block_processor.rs @@ -1,12 +1,23 @@ -use super::batch::Batch; use crate::message_processor::FUTURE_SLOT_TOLERANCE; use crate::sync::manager::SyncMessage; +use crate::sync::range_sync::BatchId; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use eth2_libp2p::PeerId; use slog::{debug, error, trace, warn}; use std::sync::{Arc, Weak}; use tokio::sync::mpsc; +use types::SignedBeaconBlock; -/// The result of attempting to process a batch of blocks. +/// Id associated to a block processing request, either a batch or a single block. +#[derive(Clone, Debug, PartialEq)] +pub enum ProcessId { + /// Processing Id of a range syncing batch. + RangeBatchId(BatchId), + /// Processing Id of the parent lookup of a block + ParentLookup(PeerId), +} + +/// The result of a block processing request. // TODO: When correct batch error handling occurs, we will include an error type. #[derive(Debug)] pub enum BatchProcessResult { @@ -16,46 +27,81 @@ pub enum BatchProcessResult { Failed, } -// TODO: Refactor to async fn, with stable futures -pub fn spawn_batch_processor( +/// Spawns a thread handling the block processing of a request: range syncing or parent lookup. +pub fn spawn_block_processor( chain: Weak>, - process_id: u64, - batch: Batch, + process_id: ProcessId, + downloaded_blocks: Vec>, mut sync_send: mpsc::UnboundedSender>, log: slog::Logger, ) { std::thread::spawn(move || { - debug!(log, "Processing batch"; "id" => *batch.id); - let result = match process_batch(chain, &batch, &log) { - Ok(_) => BatchProcessResult::Success, - Err(_) => BatchProcessResult::Failed, - }; + match process_id { + // this a request from the range sync + ProcessId::RangeBatchId(batch_id) => { + debug!(log, "Processing batch"; "id" => *batch_id, "blocks" => downloaded_blocks.len()); + let result = match process_blocks(chain, downloaded_blocks.iter(), &log) { + Ok(_) => { + debug!(log, "Batch processed"; "id" => *batch_id ); + BatchProcessResult::Success + } + Err(e) => { + debug!(log, "Batch processing failed"; "id" => *batch_id, "error" => e); + BatchProcessResult::Failed + } + }; - debug!(log, "Batch processed"; "id" => *batch.id, "result" => format!("{:?}", result)); - - sync_send - .try_send(SyncMessage::BatchProcessed { - process_id, - batch: Box::new(batch), - result, - }) - .unwrap_or_else(|_| { - debug!( - log, - "Batch result could not inform sync. Likely shutting down." - ); - }); + let msg = SyncMessage::BatchProcessed { + batch_id: batch_id, + downloaded_blocks: downloaded_blocks, + result, + }; + sync_send.try_send(msg).unwrap_or_else(|_| { + debug!( + log, + "Block processor could not inform range sync result. Likely shutting down." + ); + }); + } + // this a parent lookup request from the sync manager + ProcessId::ParentLookup(peer_id) => { + debug!(log, "Processing parent lookup"; "last_peer_id" => format!("{}", peer_id), "blocks" => downloaded_blocks.len()); + // parent blocks are ordered from highest slot to lowest, so we need to process in + // reverse + match process_blocks(chain, downloaded_blocks.iter().rev(), &log) { + Err(e) => { + warn!(log, "Parent lookup failed"; "last_peer_id" => format!("{}", peer_id), "error" => e); + sync_send + .try_send(SyncMessage::ParentLookupFailed(peer_id)) + .unwrap_or_else(|_| { + // on failure, inform to downvote the peer + debug!( + log, + "Block processor could not inform parent lookup result. Likely shutting down." + ); + }); + } + Ok(_) => { + debug!(log, "Parent lookup processed successfully"); + } + } + } + } }); } -// Helper function to process block batches which only consumes the chain and blocks to process -fn process_batch( +/// Helper function to process blocks batches which only consumes the chain and blocks to process. +fn process_blocks< + 'a, + T: BeaconChainTypes, + I: Iterator>, +>( chain: Weak>, - batch: &Batch, + downloaded_blocks: I, log: &slog::Logger, ) -> Result<(), String> { let mut successful_block_import = false; - for block in &batch.downloaded_blocks { + for block in downloaded_blocks { if let Some(chain) = chain.upgrade() { let processing_result = chain.process_block(block.clone()); @@ -72,6 +118,7 @@ fn process_batch( } BlockProcessingOutcome::ParentUnknown { parent, .. } => { // blocks should be sequential and all parents should exist + // this is a failure if blocks do not have parents warn!( log, "Parent block is unknown"; "parent_root" => format!("{}", parent), diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index ae542b324c..9fac59497b 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -33,8 +33,9 @@ //! if an attestation references an unknown block) this manager can search for the block and //! subsequently search for parents if needed. +use super::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId}; use super::network_context::SyncNetworkContext; -use super::range_sync::{Batch, BatchProcessResult, RangeSync}; +use super::range_sync::{BatchId, RangeSync}; use crate::message_processor::PeerSyncInfo; use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; @@ -99,10 +100,13 @@ pub enum SyncMessage { /// A batch has been processed by the block processor thread. BatchProcessed { - process_id: u64, - batch: Box>, + batch_id: BatchId, + downloaded_blocks: Vec>, result: BatchProcessResult, }, + + /// A parent lookup has failed for a block given by this `peer_id`. + ParentLookupFailed(PeerId), } /// Maintains a sequential list of parents to lookup and the lookup's current state. @@ -172,6 +176,9 @@ pub struct SyncManager { /// The logger for the import manager. log: Logger, + + /// The sending part of input_channel + sync_send: mpsc::UnboundedSender>, } /// Spawns a new `SyncManager` thread which has a weak reference to underlying beacon @@ -202,6 +209,7 @@ pub fn spawn( single_block_lookups: FnvHashMap::default(), full_peers: HashSet::new(), log: log.clone(), + sync_send: sync_send.clone(), }; // spawn the sync manager thread @@ -590,8 +598,6 @@ impl SyncManager { // If the last block in the queue has an unknown parent, we continue the parent // lookup-search. - let total_blocks_to_process = parent_request.downloaded_blocks.len(); - if let Some(chain) = self.chain.upgrade() { let newest_block = parent_request .downloaded_blocks @@ -606,7 +612,15 @@ impl SyncManager { return; } Ok(BlockProcessingOutcome::Processed { .. }) - | Ok(BlockProcessingOutcome::BlockIsAlreadyKnown { .. }) => {} + | Ok(BlockProcessingOutcome::BlockIsAlreadyKnown { .. }) => { + spawn_block_processor( + self.chain.clone(), + ProcessId::ParentLookup(parent_request.last_submitted_peer.clone()), + parent_request.downloaded_blocks, + self.sync_send.clone(), + self.log.clone(), + ); + } Ok(outcome) => { // all else we consider the chain a failure and downvote the peer that sent // us the last block @@ -634,64 +648,6 @@ impl SyncManager { // chain doesn't exist, drop the parent queue and return return; } - - //TODO: Shift this to a block processing thread - - // the last received block has been successfully processed, process all other blocks in the - // chain - while let Some(block) = parent_request.downloaded_blocks.pop() { - // check if the chain exists - if let Some(chain) = self.chain.upgrade() { - match chain.process_block(block) { - Ok(BlockProcessingOutcome::Processed { .. }) - | Ok(BlockProcessingOutcome::BlockIsAlreadyKnown { .. }) => {} // continue to the next block - - // all else is considered a failure - Ok(outcome) => { - // the previous blocks have failed, notify the user the chain lookup has - // failed and drop the parent queue - debug!( - self.log, "Invalid parent chain. Past blocks failure"; - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", parent_request.last_submitted_peer), - ); - self.network - .downvote_peer(parent_request.last_submitted_peer.clone()); - break; - } - Err(e) => { - warn!( - self.log, "Parent chain processing error."; - "error" => format!("{:?}", e) - ); - self.network - .downvote_peer(parent_request.last_submitted_peer.clone()); - break; - } - } - } else { - // chain doesn't exist, end the processing - break; - } - } - - // at least one block has been processed, run fork-choice - if let Some(chain) = self.chain.upgrade() { - match chain.fork_choice() { - Ok(()) => trace!( - self.log, - "Fork choice success"; - "block_imports" => total_blocks_to_process - parent_request.downloaded_blocks.len(), - "location" => "parent request" - ), - Err(e) => error!( - self.log, - "Fork choice failed"; - "error" => format!("{:?}", e), - "location" => "parent request" - ), - }; - } } } @@ -782,17 +738,20 @@ impl Future for SyncManager { self.inject_error(peer_id, request_id); } SyncMessage::BatchProcessed { - process_id, - batch, + batch_id, + downloaded_blocks, result, } => { self.range_sync.handle_block_process_result( &mut self.network, - process_id, - *batch, + batch_id, + downloaded_blocks, result, ); } + SyncMessage::ParentLookupFailed(peer_id) => { + self.network.downvote_peer(peer_id); + } }, Ok(Async::NotReady) => break, Ok(Async::Ready(None)) => { diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index e9bc70e557..57d9ee393a 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -1,6 +1,7 @@ //! Syncing for lighthouse. //! //! Stores the various syncing methods for the beacon chain. +mod block_processor; pub mod manager; mod network_context; mod range_sync; diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index e548134f71..88061378a7 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,5 +1,5 @@ use super::batch::{Batch, BatchId, PendingBatches}; -use super::batch_processing::{spawn_batch_processor, BatchProcessResult}; +use crate::sync::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId}; use crate::sync::network_context::SyncNetworkContext; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; @@ -76,7 +76,7 @@ pub struct SyncingChain { /// A random id given to a batch process request. This is None if there is no ongoing batch /// process. - current_processing_id: Option, + current_processing_batch: Option>, /// A send channel to the sync manager. This is given to the batch processor thread to report /// back once batch processing has completed. @@ -120,7 +120,7 @@ impl SyncingChain { to_be_downloaded_id: BatchId(1), to_be_processed_id: BatchId(1), state: ChainSyncingState::Stopped, - current_processing_id: None, + current_processing_batch: None, sync_send, chain, log, @@ -167,15 +167,16 @@ impl SyncingChain { // An entire batch of blocks has been received. This functions checks to see if it can be processed, // remove any batches waiting to be verified and if this chain is syncing, request new // blocks for the peer. - debug!(self.log, "Completed batch received"; "id"=> *batch.id, "blocks"=>batch.downloaded_blocks.len(), "awaiting_batches" => self.completed_batches.len()); + debug!(self.log, "Completed batch received"; "id"=> *batch.id, "blocks" => &batch.downloaded_blocks.len(), "awaiting_batches" => self.completed_batches.len()); // verify the range of received blocks // Note that the order of blocks is verified in block processing if let Some(last_slot) = batch.downloaded_blocks.last().map(|b| b.slot()) { // the batch is non-empty - if batch.start_slot > batch.downloaded_blocks[0].slot() || batch.end_slot < last_slot { + let first_slot = batch.downloaded_blocks[0].slot(); + if batch.start_slot > first_slot || batch.end_slot < last_slot { warn!(self.log, "BlocksByRange response returned out of range blocks"; - "response_initial_slot" => batch.downloaded_blocks[0].slot(), + "response_initial_slot" => first_slot, "requested_initial_slot" => batch.start_slot); network.downvote_peer(batch.current_peer); self.to_be_processed_id = batch.id; // reset the id back to here, when incrementing, it will check against completed batches @@ -218,7 +219,7 @@ impl SyncingChain { } // Only process one batch at a time - if self.current_processing_id.is_some() { + if self.current_processing_batch.is_some() { return; } @@ -238,14 +239,14 @@ impl SyncingChain { } /// Sends a batch to the batch processor. - fn process_batch(&mut self, batch: Batch) { - // only spawn one instance at a time - let processing_id: u64 = rand::random(); - self.current_processing_id = Some(processing_id); - spawn_batch_processor( + fn process_batch(&mut self, mut batch: Batch) { + let downloaded_blocks = std::mem::replace(&mut batch.downloaded_blocks, Vec::new()); + let batch_id = ProcessId::RangeBatchId(batch.id.clone()); + self.current_processing_batch = Some(batch); + spawn_block_processor( self.chain.clone(), - processing_id, - batch, + batch_id, + downloaded_blocks, self.sync_send.clone(), self.log.clone(), ); @@ -256,30 +257,41 @@ impl SyncingChain { pub fn on_batch_process_result( &mut self, network: &mut SyncNetworkContext, - processing_id: u64, - batch: &mut Option>, + batch_id: BatchId, + downloaded_blocks: &mut Option>>, result: &BatchProcessResult, ) -> Option { - if Some(processing_id) != self.current_processing_id { - // batch process doesn't belong to this chain + if let Some(current_batch) = &self.current_processing_batch { + if current_batch.id != batch_id { + // batch process does not belong to this chain + return None; + } + // Continue. This is our processing request + } else { + // not waiting on a processing result return None; } - // Consume the batch option - let batch = batch.take().or_else(|| { + // claim the result by consuming the option + let downloaded_blocks = downloaded_blocks.take().or_else(|| { + // if taken by another chain, we are no longer waiting on a result. + self.current_processing_batch = None; crit!(self.log, "Processed batch taken by another chain"); None })?; + // No longer waiting on a processing result + let mut batch = self.current_processing_batch.take().unwrap(); + // These are the blocks of this batch + batch.downloaded_blocks = downloaded_blocks; + // double check batches are processed in order TODO: Remove for prod if batch.id != self.to_be_processed_id { crit!(self.log, "Batch processed out of order"; - "processed_batch_id" => *batch.id, - "expected_id" => *self.to_be_processed_id); + "processed_batch_id" => *batch.id, + "expected_id" => *self.to_be_processed_id); } - self.current_processing_id = None; - let res = match result { BatchProcessResult::Success => { *self.to_be_processed_id += 1; diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index 28cff24e31..5d7b17c07a 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -2,11 +2,10 @@ //! peers. mod batch; -mod batch_processing; mod chain; mod chain_collection; mod range; pub use batch::Batch; -pub use batch_processing::BatchProcessResult; +pub use batch::BatchId; pub use range::RangeSync; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 59c6ff598d..d09ef4e25c 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -41,8 +41,9 @@ use super::chain::ProcessingResult; use super::chain_collection::{ChainCollection, SyncState}; -use super::{Batch, BatchProcessResult}; +use super::BatchId; use crate::message_processor::PeerSyncInfo; +use crate::sync::block_processor::BatchProcessResult; use crate::sync::manager::SyncMessage; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; @@ -130,8 +131,8 @@ impl RangeSync { }, None => { return warn!(self.log, - "Beacon chain dropped. Peer not considered for sync"; - "peer_id" => format!("{:?}", peer_id)); + "Beacon chain dropped. Peer not considered for sync"; + "peer_id" => format!("{:?}", peer_id)); } }; @@ -256,15 +257,15 @@ impl RangeSync { pub fn handle_block_process_result( &mut self, network: &mut SyncNetworkContext, - processing_id: u64, - batch: Batch, + batch_id: BatchId, + downloaded_blocks: Vec>, result: BatchProcessResult, ) { - // build an option for passing the batch to each chain - let mut batch = Some(batch); + // build an option for passing the downloaded_blocks to each chain + let mut downloaded_blocks = Some(downloaded_blocks); match self.chains.finalized_request(|chain| { - chain.on_batch_process_result(network, processing_id, &mut batch, &result) + chain.on_batch_process_result(network, batch_id, &mut downloaded_blocks, &result) }) { Some((index, ProcessingResult::RemoveChain)) => { let chain = self.chains.remove_finalized_chain(index); @@ -293,7 +294,12 @@ impl RangeSync { Some((_, ProcessingResult::KeepChain)) => {} None => { match self.chains.head_request(|chain| { - chain.on_batch_process_result(network, processing_id, &mut batch, &result) + chain.on_batch_process_result( + network, + batch_id, + &mut downloaded_blocks, + &result, + ) }) { Some((index, ProcessingResult::RemoveChain)) => { let chain = self.chains.remove_head_chain(index); @@ -308,7 +314,7 @@ impl RangeSync { None => { // This can happen if a chain gets purged due to being out of date whilst a // batch process is in progress. - debug!(self.log, "No chains match the block processing id"; "id" => processing_id); + debug!(self.log, "No chains match the block processing id"; "id" => *batch_id); } } } From f8bc045a01764f81c32f48c496cabee8281f965e Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 23 Mar 2020 18:28:13 +1100 Subject: [PATCH 07/24] Fix race condition in the syncing sim (#944) * Fix race condition in the syncing sim * Update another strategy --- tests/simulator/src/local_network.rs | 2 +- tests/simulator/src/sync_sim.rs | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/simulator/src/local_network.rs b/tests/simulator/src/local_network.rs index c659b2f65e..ad01eaf257 100644 --- a/tests/simulator/src/local_network.rs +++ b/tests/simulator/src/local_network.rs @@ -94,7 +94,7 @@ impl LocalNetwork { .expect("bootnode must have a network"), ); }) - .expect("should have atleast one node"); + .expect("should have at least one node"); let index = self.beacon_nodes.read().len(); diff --git a/tests/simulator/src/sync_sim.rs b/tests/simulator/src/sync_sim.rs index 6c18406b6e..13fa61d042 100644 --- a/tests/simulator/src/sync_sim.rs +++ b/tests/simulator/src/sync_sim.rs @@ -94,8 +94,10 @@ pub fn verify_two_nodes_sync( // Add beacon nodes network .add_beacon_node(beacon_config.clone()) - .join(network.add_beacon_node(beacon_config.clone())) - .map(|_| network) + .map(|_| (network, beacon_config)) + .and_then(|(network, beacon_config)| { + network.add_beacon_node(beacon_config).map(|_| network) + }) }) .and_then(move |network| { // Delay for `sync_delay` epochs before verifying synced state. @@ -128,8 +130,10 @@ pub fn verify_in_between_sync( // Add a beacon node network .add_beacon_node(beacon_config.clone()) - .join(network.add_beacon_node(beacon_config.clone())) - .map(|_| network) + .map(|_| (network, beacon_config)) + .and_then(|(network, beacon_config)| { + network.add_beacon_node(beacon_config).map(|_| network) + }) }) .and_then(move |network| { // Delay before adding additional syncing nodes. From f6a6de2c5d9e6fe83b6ded24bad93615f98e2163 Mon Sep 17 00:00:00 2001 From: Sacha Saint-Leger Date: Mon, 23 Mar 2020 09:21:53 +0100 Subject: [PATCH 08/24] Become a Validator guides: update (#928) * Edit become-a-validator-docker.md * Update Become a Validator guides * Update Become a Validator guides * Update Become a Validator guides * fix inconsistency --- book/src/become-a-validator-docker.md | 78 ++++++++++----- book/src/become-a-validator-source.md | 134 ++++++++++++++------------ book/src/become-a-validator.md | 31 +++--- 3 files changed, 149 insertions(+), 94 deletions(-) diff --git a/book/src/become-a-validator-docker.md b/book/src/become-a-validator-docker.md index 7fb03845da..9fb4880a8f 100644 --- a/book/src/become-a-validator-docker.md +++ b/book/src/become-a-validator-docker.md @@ -12,18 +12,28 @@ binary yourself. > experience with docker-compose to integrate your locally built docker image > with the docker-compose environment. -### 1. Clone the repository +## 0. Install Docker Compose -Once you have docker-compose -[installed](https://docs.docker.com/compose/install/), clone the -[sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repository. + Docker Compose relies on Docker Engine for any meaningful work, so make sure you have Docker Engine installed either locally or remote, depending on your setup. + +- On desktop systems like [Docker Desktop for Mac](https://docs.docker.com/docker-for-mac/install/) and [Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/install/), Docker Compose is included as part of those desktop installs, so the desktop install is all you need. + +- On Linux systems, you'll need to first [install the Docker for your OS](https://docs.docker.com/install/#server) and then [follow the instuctions here](https://docs.docker.com/compose/install/#install-compose-on-linux-systems). + +> For more on installing Compose, see [here](https://docs.docker.com/compose/install/). + + +## 1. Clone the repository + +Once you have Docker Compose installed, clone the +[sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repository: ```bash -$ git clone https://github.com/sigp/lighthouse-docker -$ cd lighthouse-docker + git clone https://github.com/sigp/lighthouse-docker + cd lighthouse-docker ``` -### 2. Configure the docker environment +## 2. Configure the Docker environment Then, create a file named `.env` with the following contents (these values are documented @@ -41,36 +51,58 @@ DEPOSIT_VALUE=3200000000 _This `.env` file should live in the `lighthouse-docker` directory alongside the `docker-compose.yml` file_. -### 3. Start Lighthouse +## 3. Start Lighthouse -Start the docker-compose environment (you may need to use `sudo`): +Start the docker-compose environment (you may need to prefix the below command with `sudo`): ```bash -$ docker-compose up + docker-compose up ``` Watch the output of this command for the `Saved new validator to disk` log, as -the `voting_pubkey` is the primary identifier for your new validator. This is -useful for finding your validator in block explorers. Here's an example of the -log: +it contains your `voting_pubkey` -- the primary identifier for your new validator. This key is useful for finding your validator in block explorers. Here's an example of the log: ```bash -validator_client_1 | Jan 10 12:06:05.632 INFO Saved new validator to disk voting_pubkey: 0x8fc28504448783b10b0a7f5a321505b07ad2ad8d6a8430b8868a0fcdedee43766bee725855506626085776e020dfa472 +validator_client_1 | Jan 10 12:06:05.632 INFO Saved new validator to disk +voting_pubkey: 0x8fc28504448783b10b0a7f5a321505b07ad2ad8d6a8430b8868a0fcdedee43766bee725855506626085776e020dfa472 ``` +This is one of the first logs outputted, so you may have to scroll up or perform a search in your terminal to find it. -> Note: the docker-compose setup includes a fast-synced geth node. You can +> Note: `docker-compose up` generates a new sub-directory -- to store your validator's deposit data, along with its voting and withdrawal keys -- in the `.lighthouse/validators` directory. This sub-directory is identified by your validator's `voting_pubkey` (the same `voting_pubkey` you see in the logs). So this is another way you can find it. + +> Note: the docker-compose setup includes a fast-synced geth node. So you can > expect the `beacon_node` to log some eth1-related errors whilst the geth node > boots and becomes synced. This will only happen on the first start of the > compose environment or if geth loses sync. -### Installation complete! +To find an estimate for how long your beacon node will take to finish syncing, look for logs that look like this: -In the next step you'll need to locate your `eth1_deposit_data.rlp` file from -your `.lighthouse/validators` directory. +```bash +beacon_node_1 | Mar 16 11:33:53.979 INFO Syncing +est_time: 47 mins, speed: 16.67 slots/sec, distance: 47296 slots (7 days 14 hrs), peers: 3, service: slot_notifier +``` -The `./lighthouse` directory is in the root of the `lighthouse-docker` -repository. For example, if you ran Step 1 in `/home/karlm/` then you can find -your validator directory in -`/home/karlm/lighthouse-docker/.lighthouse/validators/`. +You'll find the estimated time under `est_time`. In the example above, that's `47 mins`. -You can now go to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli). +If your beacon node hasn't finished syncing yet, you'll see some ERRO messages indicating that your node hasn't synced yet: + +```bash +validator_client_1 | Mar 16 11:34:36.086 ERRO Beacon node is not synced current_epoch: 6999, node_head_epoch: 5531, service: duties +``` + +It's safest to wait for your node to sync before moving on to the next step, otherwise your validator may activate before you're able to produce blocks and attestations (and you may be penalized as a result). + +However, since it generally takes somewhere between 4 and 8 hours after depositing for a validator to become active, if your `est_time` is less than 4 hours, you _should_ be fine to just move on to the next step. After all, this is a testnet and you're only risking Goerli ETH! + +## Installation complete! + +In the [next step](become-a-validator.html#2-submit-your-deposit-to-goerli) you'll need to upload your validator's deposit data. This data is stored in a file called `eth1_deposit_data.rlp`. + +You'll find it in `lighthouse-docker/.lighthouse/validators/` -- in the sub-directory that corresponds to your validator's public key (`voting_pubkey`). + + +> For example, if you ran [step 1](become-a-validator-docker.html#1-clone-the-repository) in `/home/karlm/`, and your validator's `voting_pubkey` is `0x8592c7..`, then you'll find your `eth1_deposit_data.rlp` file in the following directory: +> +>`/home/karlm/lighthouse-docker/.lighthouse/validators/0x8592c7../` + +Once you've located `eth1_deposit_data.rlp`, you're ready to move on to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli). diff --git a/book/src/become-a-validator-source.md b/book/src/become-a-validator-source.md index d700ee6968..7d3606b4af 100644 --- a/book/src/become-a-validator-source.md +++ b/book/src/become-a-validator-source.md @@ -1,103 +1,106 @@ -# Become an Validator: Building from Source +# Become a Validator: Building from Source + +## 0. Install Rust +If you don't have Rust installed already, visit [rustup.rs](https://rustup.rs/) to install it. + +> Note: if you're not familiar with Rust or you'd like more detailed instructions, see our [installation guide](./installation.md). + ## 1. Download and install Lighthouse -If you already have Rust installed, you can install Lighthouse with the -following commands (don't forget to use the `testnet5` branch): +Once you have Rust installed, you can install Lighthouse with the following commands (don't forget to use the `testnet5` branch): -- `$ git clone https://github.com/sigp/lighthouse.git` -- `$ git checkout testnet5` -- `$ cd lighthouse` -- `$ make` +1. `git clone https://github.com/sigp/lighthouse.git` +2. `cd lighthouse` +3. `git checkout testnet5` +4. `make` + +You may need to open a new terminal window before running `make`. You've completed this step when you can run `$ lighthouse --help` and see the help menu. -> - If you're not familiar with Rust or you'd like more detailed instructions, -> see the [Installation Guide](./installation.md) which contains a -> [Troubleshooting](installation.html#troubleshooting) section. ## 2. Start an Eth1 client -As Eth2 relies upon the Eth1 chain for validator on-boarding and eventually -Eth1 may use the Eth2 chain as a finality gadget, all Eth2 validators must have -a connection to an Eth1 node. +Since Eth2 relies upon the Eth1 chain for validator on-boarding, all Eth2 validators must have a connection to an Eth1 node. -We provide instructions for using Geth (this is, by chance, what we ended up -testing with), but you could use any client that implements the JSON RPC via -HTTP. At least for Geth, a fast-synced node is sufficient. +We provide instructions for using Geth (the Eth1 client that, by chance, we ended up testing with), but you could use any client that implements the JSON RPC via HTTP. A fast-synced node should be sufficient. + +### Installing Geth +If you're using a Mac, follow the instructions [listed here](https://github.com/ethereum/go-ethereum/wiki/Installation-Instructions-for-Mac) to install geth. Otherwise [see here](https://github.com/ethereum/go-ethereum/wiki/Installing-Geth). ### Starting Geth -[Install geth](https://github.com/ethereum/go-ethereum/wiki/Installing-Geth) -and then use this command (or equivalent) to start your Eth1 node: +Once you have geth installed, use this command to start your Eth1 node: ```bash -$ geth --goerli --rpc + geth --goerli --rpc ``` -## 3. Start your Beacon Node +## 3. Start your beacon node The beacon node is the core component of Eth2, it connects to other peers over -the Internet and maintains a view of the chain. +the internet and maintains a view of the chain. Start your beacon node with: ```bash -$ lighthouse beacon --eth1 --http + lighthouse beacon --eth1 --http ``` +>Note: the `--http` flag enables the HTTP API for the validator client. And the `--eth1` flag tells the beacon node that it should sync with an Ethereum1 node (e.g. Geth). These flags are only required if you wish to run a validator. + + Your beacon node has started syncing when you see the following (truncated) log: ``` -Dec 09 12:57:18.026 INFO Syncing est_time: 2 hrs ... +Dec 09 12:57:18.026 INFO Syncing +est_time: 2 hrs ... ``` The `distance` value reports the time since eth2 genesis, whilst the `est_time` reports an estimate of how long it will take your node to become synced. -It has finished syncing once you see the following (truncated) log: +You'll know it's finished syncing once you see the following (truncated) log: ``` -Dec 09 12:27:06.010 INFO Synced slot: 16835, ... +Dec 09 12:27:06.010 INFO Synced +slot: 16835, ... ``` -> - The `--http` flag enables the HTTP API for the validator client. -> - The `--eth1` flag tells the beacon node that it should sync with an Ethereum -> 1 node (e.g., Geth). This is only required if you wish to run a validator. ## 4. Generate your validator key Generate new validator BLS keypairs using: -```shell -$ lighthouse account validator new random +```bash + lighthouse account validator new random ``` -Take note of the `voting_pubkey` of the new validator. This will be the primary -identifier of the validator. This is how you can find your validator in block -explorers. +Take note of the `voting_pubkey` of the new validator: -You've completed this step when you see the equivalent line: +``` +INFO Saved new validator to disk +voting_pubkey: 0xa1625249d80... +``` + +It's the validator's primary identifier, and will be used to find your validator in block explorers. + +You've completed this step when you see something like the following line: ``` Dec 02 21:42:01.337 INFO Generated validator directories count: 1, base_path: "/home/karl/.lighthouse/validators" ``` -> - This will generate a new _validator directory_ in the `.lighthouse/validators` -> directory. Your validator directory will be identified by it's public key, -> which looks something like `0xc483de...`. You'll need to find this directory -> for the next step. -> - These keys are good enough for the Lighthouse testnet, however they shouldn't -> be considered secure until we've undergone a security audit (planned Jan -> 2020). +This means you've successfully generated a new sub-directory for your validator in the `.lighthouse/validators` directory. The sub-directory is identified by your validator's public key (`voting_pubkey`). And is used to store your validator's deposit data, along with its voting and withdrawal keys. + +> Note: these keypairs are good enough for the Lighthouse testnet, however they shouldn't be considered secure until we've undergone a security audit (planned March/April). ## 5. Start your validator client -For security reasons, the validator client runs separately to the beacon node. -The validator client stores private keys and signs messages generated by the -beacon node. +Since the validator client stores private keys and signs messages generated by the beacon node, for security reasons it runs separately from it. You'll need both your beacon node _and_ validator client running if you want to stake. @@ -105,32 +108,45 @@ stake. Start the validator client with: ```bash -$ lighthouse validator + lighthouse validator ``` -The validator client is running and has found your validator keys from step 3 -when you see the following log: +You know that your validator client is running and has found your validator keys from [step 3](become-a-validator-source.html#3-start-your-beacon-node) when you see the following logs: ``` Dec 09 13:08:59.171 INFO Loaded validator keypair store voting_validators: 1 Dec 09 13:09:09.000 INFO Awaiting activation slot: 17787, ... ``` + +To find an estimate for how long your beacon node will take to finish syncing, lookout for the following logs: + +```bash +beacon_node_1 | Mar 16 11:33:53.979 INFO Syncing +est_time: 47 mins, speed: 16.67 slots/sec, distance: 47296 slots (7 days 14 hrs), peers: 3, service: slot_notifier +``` + +You'll find the estimated time under `est_time`. In the example log above, that's `47 mins`. + If your beacon node hasn't finished syncing yet, you'll see some `ERRO` -messages indicating that your node isn't synced yet. It is safest to wait for -your node to sync before moving onto the next step, otherwise your validator -may activate before you're able to produce blocks and attestations. However, it -generally takes 4-8+ hours after deposit for a validator to become active. If -your `est_time` is less than 4 hours, you _should_ be fine to just move to the -next step. After all, this is a testnet and you're only risking Goerli ETH. +messages indicating that your node hasn't synced yet: + +```bash +validator_client_1 | Mar 16 11:34:36.086 ERRO Beacon node is not synced current_epoch: 6999, node_head_epoch: 5531, service: duties +``` + +It's safest to wait for your node to sync before moving on to the next step, otherwise your validator may activate before you're able to produce blocks and attestations (and you may be penalized as a result). + +However, since it generally takes somwhere between 4 and 8 hours after depositing for a validator to become active, if your `est_time` is less than 4 hours, you _should_ be fine to just move on to the next step. After all, this is a testnet and you're only risking Goerli ETH! ## Installation complete! -In the next step you'll need to locate your `eth1_deposit_data.rlp` file from -your `.lighthouse/validators` directory. +In the [next step](become-a-validator.html#2-submit-your-deposit-to-goerli) you'll need to upload your validator's deposit data. This data is stored in a file called `eth1_deposit_data.rlp`. -The `./lighthouse` directory is in your `$HOME` directory. For example, if -you're in Linux and your user is `karlm`, you can find your validator directory -in `/home/karlm/.lighthouse/validators/`. +You'll find it in `/home/.lighthouse/validators` -- in the sub-directory that corresponds to your validator's public key (`voting_pubkey`). -You can now go to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli). +> For example, if your username is `karlm`, and your validator's public key (aka `voting_pubkey`) is `0x8592c7..`, then you'll find your `eth1_deposit_data.rlp` file in the following directory: +> +>`/home/karlm/.lighthouse/validators/0x8592c7../` + +Once you've located your `eth1_deposit_data.rlp` file, you're ready to move on to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli). diff --git a/book/src/become-a-validator.md b/book/src/become-a-validator.md index c86819390f..6427b414f4 100644 --- a/book/src/become-a-validator.md +++ b/book/src/become-a-validator.md @@ -1,30 +1,36 @@ # Become an Ethereum 2.0 Testnet Validator -Running Lighthouse validator is easy if you're familiar with the terminal. It -runs on Linux, MacOS and Windows and we have a Docker work-flow. +Running a Lighthouse validator is easy if you're familiar with the terminal. -Before you start, you'll need [Metamask](https://metamask.io/) and 3.2 gETH +Lighthouse runs on Linux, MacOS and Windows and has a Docker work-flow to make things as simple as possible. + + +## 0. Acquire Goerli ETH +Before you install Lighthouse, you'll need [Metamask](https://metamask.io/) and 3.2 gETH (Goerli ETH). We recommend the [mudit.blog faucet](https://faucet.goerli.mudit.blog/) for those familiar with Goerli, or [goerli.net](https://goerli.net/) for an overview of the testnet. +> If this is your first time using Metamask and/or interacting with an ethereum test network, we recommend going through the beginning of [this guide](https://hack.aragon.org/docs/guides-use-metamask) first (up to the *Signing your first transaction with MetaMask* section). + ## 1. Install and start Lighthouse There are two, different ways to install and start a Lighthouse validator: -- [Using `docker-compose`](./become-a-validator-docker.md): this is the easiest method. -- [Building from source](./become-a-validator-source.md): this is a little more involved, however it +1. [Using `docker-compose`](./become-a-validator-docker.md): this is the easiest method. + +2. [Building from source](./become-a-validator-source.md): this is a little more involved, however it gives a more hands-on experience. -Once you have completed **only one** of these steps, move onto the next step. +Once you've completed **either one** of these steps, you can move onto the next step. ## 2. Submit your deposit to Goerli