diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 423f3deca2..d536869e45 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -418,3 +418,14 @@ jobs: env: # Allow warnings on Nightly RUSTFLAGS: "" + compile-with-beta-compiler: + name: compile-with-beta-compiler + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install dependencies + run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler + - name: Use Rust beta + run: rustup override set beta + - name: Run make + run: make diff --git a/Cargo.lock b/Cargo.lock index 42aa86ee5e..1041c91d1d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -441,7 +441,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.2.1" +version = "3.3.0" dependencies = [ "beacon_chain", "clap", @@ -599,7 +599,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.2.1" +version = "3.3.0" dependencies = [ "beacon_node", "clap", @@ -2704,6 +2704,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "bs58", + "directory", "environment", "eth1", "eth2", @@ -2728,6 +2729,8 @@ dependencies = [ "slot_clock", "state_processing", "store", + "sysinfo", + "system_health", "task_executor", "tokio", "tokio-stream", @@ -3088,7 +3091,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.2.1" +version = "3.3.0" dependencies = [ "account_utils", "beacon_chain", @@ -3554,7 +3557,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.2.1" +version = "3.3.0" dependencies = [ "account_manager", "account_utils", @@ -4143,6 +4146,15 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "ntapi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc51db7b362b205941f71232e56c625156eb9a929f8cf74a428fd5bc094a4afc" +dependencies = [ + "winapi", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -6236,6 +6248,34 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sysinfo" +version = "0.26.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29ddf41e393a9133c81d5f0974195366bd57082deac6e0eb02ed39b8341c2bb6" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "winapi", +] + +[[package]] +name = "system_health" +version = "0.1.0" +dependencies = [ + "lighthouse_network", + "parking_lot 0.12.1", + "serde", + "serde_derive", + "serde_json", + "sysinfo", + "types", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -7114,6 +7154,8 @@ dependencies = [ "slashing_protection", "slog", "slot_clock", + "sysinfo", + "system_health", "task_executor", "tempfile", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 7505730091..c9fad65a1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ members = [ "common/oneshot_broadcast", "common/sensitive_url", "common/slot_clock", + "common/system_health", "common/task_executor", "common/target_check", "common/test_random_derive", diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index bbd2cbc999..da01121055 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -114,7 +114,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { pub fn cli_run( matches: &ArgMatches, - mut env: Environment, + env: Environment, validator_dir: PathBuf, ) -> Result<(), String> { let spec = env.core_context().eth2_config.spec; diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index b85aae2f4f..d47f77da93 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.2.1" +version = "3.3.0" authors = ["Paul Hauner ", "Age Manning BeaconChain { self: &Arc, chain_segment: Vec>>, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { let mut imported_blocks = 0; @@ -2409,6 +2410,7 @@ impl BeaconChain { signature_verified_block.block_root(), signature_verified_block, count_unrealized, + notify_execution_layer, ) .await { @@ -2497,6 +2499,7 @@ impl BeaconChain { block_root: Hash256, unverified_block: B, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2510,8 +2513,11 @@ impl BeaconChain { // A small closure to group the verification and import errors. let chain = self.clone(); let import_block = async move { - let execution_pending = - unverified_block.into_execution_pending_block(block_root, &chain)?; + let execution_pending = unverified_block.into_execution_pending_block( + block_root, + &chain, + notify_execution_layer, + )?; chain .import_execution_pending_block(execution_pending, count_unrealized) .await @@ -2581,6 +2587,7 @@ impl BeaconChain { confirmed_state_roots, payload_verification_handle, parent_eth1_finalization_data, + consensus_context, } = execution_pending_block; let PayloadVerificationOutcome { @@ -2634,6 +2641,7 @@ impl BeaconChain { count_unrealized, parent_block, parent_eth1_finalization_data, + consensus_context, ) }, "payload_verification_handle", @@ -2659,70 +2667,36 @@ impl BeaconChain { count_unrealized: CountUnrealized, parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, + mut consensus_context: ConsensusContext, ) -> Result> { + // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- + // Everything in this initial section is on the hot path between processing the block and + // being able to attest to it. DO NOT add any extra processing in this initial section + // unless it must run before fork choice. + // ----------------------------------------------------------------------------------------- let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + let block = signed_block.message(); + let post_exec_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_POST_EXEC_PROCESSING); - let attestation_observation_timer = - metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); - - // Iterate through the attestations in the block and register them as an "observed - // attestation". This will stop us from propagating them on the gossip network. - for a in signed_block.message().body().attestations() { - match self.observed_attestations.write().observe_item(a, None) { - // If the observation was successful or if the slot for the attestation was too - // low, continue. - // - // We ignore `SlotTooLow` since this will be very common whilst syncing. - Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {} - Err(e) => return Err(BlockError::BeaconChainError(e.into())), - } - } - - metrics::stop_timer(attestation_observation_timer); - - // If a slasher is configured, provide the attestations from the block. - if let Some(slasher) = self.slasher.as_ref() { - for attestation in signed_block.message().body().attestations() { - let committee = - state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = get_indexed_attestation(committee.committee, attestation) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; - slasher.accept_attestation(indexed_attestation); - } - } + // Check against weak subjectivity checkpoint. + self.check_block_against_weak_subjectivity_checkpoint(block, block_root, &state)?; // If there are new validators in this block, update our pubkey cache. // - // We perform this _before_ adding the block to fork choice because the pubkey cache is - // used by attestation processing which will only process an attestation if the block is - // known to fork choice. This ordering ensure that the pubkey cache is always up-to-date. - self.validator_pubkey_cache + // The only keys imported here will be ones for validators deposited in this block, because + // the cache *must* already have been updated for the parent block when it was imported. + // Newly deposited validators are not active and their keys are not required by other parts + // of block processing. The reason we do this here and not after making the block attestable + // is so we don't have to think about lock ordering with respect to the fork choice lock. + // There are a bunch of places where we lock both fork choice and the pubkey cache and it + // would be difficult to check that they all lock fork choice first. + let mut kv_store_ops = self + .validator_pubkey_cache .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? .import_new_pubkeys(&state)?; - // For the current and next epoch of this state, ensure we have the shuffling from this - // block in our cache. - for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] { - let shuffling_id = AttestationShufflingId::new(block_root, &state, *relative_epoch)?; - - let shuffling_is_cached = self - .shuffling_cache - .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)? - .contains(&shuffling_id); - - if !shuffling_is_cached { - state.build_committee_cache(*relative_epoch, &self.spec)?; - let committee_cache = state.committee_cache(*relative_epoch)?; - self.shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)? - .insert_committee_cache(shuffling_id, committee_cache); - } - } - // Apply the state to the attester cache, only if it is from the previous epoch or later. // // In a perfect scenario there should be no need to add previous-epoch states to the cache. @@ -2734,52 +2708,7 @@ impl BeaconChain { .map_err(BeaconChainError::from)?; } - // Alias for readability. - let block = signed_block.message(); - - // Only perform the weak subjectivity check if it was configured. - if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint { - // Note: we're using the finalized checkpoint from the head state, rather than fork - // choice. - // - // We are doing this to ensure that we detect changes in finalization. It's possible - // that fork choice has already been updated to the finalized checkpoint in the block - // we're importing. - let current_head_finalized_checkpoint = - self.canonical_head.cached_head().finalized_checkpoint(); - // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. - let new_finalized_checkpoint = state.finalized_checkpoint(); - - // This ensures we only perform the check once. - if (current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch) - && (wss_checkpoint.epoch <= new_finalized_checkpoint.epoch) - { - if let Err(e) = - self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, &state) - { - let mut shutdown_sender = self.shutdown_sender(); - crit!( - self.log, - "Weak subjectivity checkpoint verification failed while importing block!"; - "block_root" => ?block_root, - "parent_root" => ?block.parent_root(), - "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, - "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, - "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, - "error" => ?e, - ); - crit!(self.log, "You must use the `--purge-db` flag to clear the database and restart sync. You may be on a hostile network."); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Weak subjectivity checkpoint verification failed. Provided block root is not a checkpoint." - )) - .map_err(|err| BlockError::BeaconChainError(BeaconChainError::WeakSubjectivtyShutdownError(err)))?; - return Err(BlockError::WeakSubjectivityConflict); - } - } - } - - // Take an exclusive write-lock on fork choice. It's very important prevent deadlocks by + // Take an exclusive write-lock on fork choice. It's very important to prevent deadlocks by // avoiding taking other locks whilst holding this lock. let mut fork_choice = self.canonical_head.fork_choice_write_lock(); @@ -2809,77 +2738,6 @@ impl BeaconChain { .map_err(|e| BlockError::BeaconChainError(e.into()))?; } - // Allow the validator monitor to learn about a new valid state. - self.validator_monitor - .write() - .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), &state); - let validator_monitor = self.validator_monitor.read(); - - // Register each attester slashing in the block with fork choice. - for attester_slashing in block.body().attester_slashings() { - fork_choice.on_attester_slashing(attester_slashing); - } - - // Register each attestation in the block with the fork choice service. - for attestation in block.body().attestations() { - let _fork_choice_attestation_timer = - metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - let attestation_target_epoch = attestation.data.target.epoch; - - let committee = - state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = get_indexed_attestation(committee.committee, attestation) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; - - match fork_choice.on_attestation( - current_slot, - &indexed_attestation, - AttestationFromBlock::True, - &self.spec, - ) { - Ok(()) => Ok(()), - // Ignore invalid attestations whilst importing attestations from a block. The - // block might be very old and therefore the attestations useless to fork choice. - Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()), - Err(e) => Err(BlockError::BeaconChainError(e.into())), - }?; - - // To avoid slowing down sync, only register attestations for the - // `observed_block_attesters` if they are from the previous epoch or later. - if attestation_target_epoch + 1 >= current_epoch { - let mut observed_block_attesters = self.observed_block_attesters.write(); - for &validator_index in &indexed_attestation.attesting_indices { - if let Err(e) = observed_block_attesters - .observe_validator(attestation_target_epoch, validator_index as usize) - { - debug!( - self.log, - "Failed to register observed block attester"; - "error" => ?e, - "epoch" => attestation_target_epoch, - "validator_index" => validator_index, - ) - } - } - } - - // Only register this with the validator monitor when the block is sufficiently close to - // the current slot. - if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch() - + block.slot().as_u64() - >= current_slot.as_u64() - { - match fork_choice.get_block(&block.parent_root()) { - Some(parent_block) => validator_monitor.register_attestation_in_block( - &indexed_attestation, - parent_block.slot, - &self.spec, - ), - None => warn!(self.log, "Failed to get parent block"; "slot" => %block.slot()), - } - } - } - // If the block is recent enough and it was not optimistically imported, check to see if it // becomes the head block. If so, apply it to the early attester cache. This will allow // attestations to the block without waiting for the block and state to be inserted to the @@ -2928,56 +2786,28 @@ impl BeaconChain { ), } } + drop(post_exec_timer); - // Register sync aggregate with validator monitor - if let Ok(sync_aggregate) = block.body().sync_aggregate() { - // `SyncCommittee` for the sync_aggregate should correspond to the duty slot - let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - let sync_committee = self.sync_committee_at_epoch(duty_epoch)?; - let participant_pubkeys = sync_committee - .pubkeys - .iter() - .zip(sync_aggregate.sync_committee_bits.iter()) - .filter_map(|(pubkey, bit)| bit.then_some(pubkey)) - .collect::>(); + // ---------------------------- BLOCK PROBABLY ATTESTABLE ---------------------------------- + // Most blocks are now capable of being attested to thanks to the `early_attester_cache` + // cache above. Resume non-essential processing. + // ----------------------------------------------------------------------------------------- - validator_monitor.register_sync_aggregate_in_block( - block.slot(), - block.parent_root(), - participant_pubkeys, - ); - } - - for exit in block.body().voluntary_exits() { - validator_monitor.register_block_voluntary_exit(&exit.message) - } - - for slashing in block.body().attester_slashings() { - validator_monitor.register_block_attester_slashing(slashing) - } - - for slashing in block.body().proposer_slashings() { - validator_monitor.register_block_proposer_slashing(slashing) - } - - drop(validator_monitor); - - // Only present some metrics for blocks from the previous epoch or later. - // - // This helps avoid noise in the metrics during sync. - if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 1 >= self.epoch()? { - metrics::observe( - &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, - block.body().attestations().len() as f64, - ); - - if let Ok(sync_aggregate) = block.body().sync_aggregate() { - metrics::set_gauge( - &metrics::BLOCK_SYNC_AGGREGATE_SET_BITS, - sync_aggregate.num_set_bits() as i64, - ); - } - } + self.import_block_update_shuffling_cache(block_root, &mut state)?; + self.import_block_observe_attestations( + block, + &state, + &mut consensus_context, + current_epoch, + ); + self.import_block_update_validator_monitor( + block, + &state, + &mut consensus_context, + current_slot, + parent_block.slot(), + ); + self.import_block_update_slasher(block, &state, &mut consensus_context); let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); @@ -2994,7 +2824,9 @@ impl BeaconChain { ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); - if let Err(e) = self.store.do_atomically(ops) { + kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?); + + if let Err(e) = self.store.hot_db.do_atomically(kv_store_ops) { error!( self.log, "Database write failed!"; @@ -3002,6 +2834,10 @@ impl BeaconChain { "error" => ?e, ); + // Clear the early attester cache to prevent attestations which we would later be unable + // to verify due to the failure. + self.early_attester_cache.clear(); + // Since the write failed, try to revert the canonical head back to what was stored // in the database. This attempts to prevent inconsistency between the database and // fork choice. @@ -3044,6 +2880,7 @@ impl BeaconChain { eth1_deposit_index: state.eth1_deposit_index(), }; let current_finalized_checkpoint = state.finalized_checkpoint(); + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .ok_or(Error::SnapshotCacheLockTimeout) @@ -3051,7 +2888,7 @@ impl BeaconChain { snapshot_cache.insert( BeaconSnapshot { beacon_state: state, - beacon_block: signed_block, + beacon_block: signed_block.clone(), beacon_block_root: block_root, }, None, @@ -3070,22 +2907,312 @@ impl BeaconChain { self.head_tracker .register_block(block_root, parent_root, slot); - // Send an event to the `events` endpoint after fully processing the block. - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_block_subscribers() { - event_handler.register(EventKind::Block(SseBlock { - slot, - block: block_root, - execution_optimistic: payload_verification_status.is_optimistic(), - })); - } - } - metrics::stop_timer(db_write_timer); metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); - let block_delay_total = get_slot_delay_ms(block_time_imported, slot, &self.slot_clock); + // Update the deposit contract cache. + self.import_block_update_deposit_contract_finalization( + block, + block_root, + current_epoch, + current_finalized_checkpoint, + current_eth1_finalization_data, + parent_eth1_finalization_data, + parent_block.slot(), + ); + + // Inform the unknown block cache, in case it was waiting on this block. + self.pre_finalization_block_cache + .block_processed(block_root); + + self.import_block_update_metrics_and_events( + block, + block_root, + block_time_imported, + payload_verification_status, + current_slot, + ); + + Ok(block_root) + } + + /// Check block's consistentency with any configured weak subjectivity checkpoint. + fn check_block_against_weak_subjectivity_checkpoint( + &self, + block: BeaconBlockRef, + block_root: Hash256, + state: &BeaconState, + ) -> Result<(), BlockError> { + // Only perform the weak subjectivity check if it was configured. + let wss_checkpoint = if let Some(checkpoint) = self.config.weak_subjectivity_checkpoint { + checkpoint + } else { + return Ok(()); + }; + // Note: we're using the finalized checkpoint from the head state, rather than fork + // choice. + // + // We are doing this to ensure that we detect changes in finalization. It's possible + // that fork choice has already been updated to the finalized checkpoint in the block + // we're importing. + let current_head_finalized_checkpoint = + self.canonical_head.cached_head().finalized_checkpoint(); + // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. + let new_finalized_checkpoint = state.finalized_checkpoint(); + + // This ensures we only perform the check once. + if current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch + && wss_checkpoint.epoch <= new_finalized_checkpoint.epoch + { + if let Err(e) = + self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, state) + { + let mut shutdown_sender = self.shutdown_sender(); + crit!( + self.log, + "Weak subjectivity checkpoint verification failed while importing block!"; + "block_root" => ?block_root, + "parent_root" => ?block.parent_root(), + "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, + "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, + "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, + "error" => ?e + ); + crit!( + self.log, + "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network." + ); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Weak subjectivity checkpoint verification failed. \ + Provided block root is not a checkpoint.", + )) + .map_err(|err| { + BlockError::BeaconChainError( + BeaconChainError::WeakSubjectivtyShutdownError(err), + ) + })?; + return Err(BlockError::WeakSubjectivityConflict); + } + } + Ok(()) + } + + /// Process a block for the validator monitor, including all its constituent messages. + fn import_block_update_validator_monitor( + &self, + block: BeaconBlockRef, + state: &BeaconState, + ctxt: &mut ConsensusContext, + current_slot: Slot, + parent_block_slot: Slot, + ) { + // Only register blocks with the validator monitor when the block is sufficiently close to + // the current slot. + if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch() + + block.slot().as_u64() + < current_slot.as_u64() + { + return; + } + + // Allow the validator monitor to learn about a new valid state. + self.validator_monitor + .write() + .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), state); + + let validator_monitor = self.validator_monitor.read(); + + // Sync aggregate. + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + // `SyncCommittee` for the sync_aggregate should correspond to the duty slot + let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + + match self.sync_committee_at_epoch(duty_epoch) { + Ok(sync_committee) => { + let participant_pubkeys = sync_committee + .pubkeys + .iter() + .zip(sync_aggregate.sync_committee_bits.iter()) + .filter_map(|(pubkey, bit)| bit.then_some(pubkey)) + .collect::>(); + + validator_monitor.register_sync_aggregate_in_block( + block.slot(), + block.parent_root(), + participant_pubkeys, + ); + } + Err(e) => { + warn!( + self.log, + "Unable to fetch sync committee"; + "epoch" => duty_epoch, + "purpose" => "validator monitor", + "error" => ?e, + ); + } + } + } + + // Attestations. + for attestation in block.body().attestations() { + let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) { + Ok(indexed) => indexed, + Err(e) => { + debug!( + self.log, + "Failed to get indexed attestation"; + "purpose" => "validator monitor", + "attestation_slot" => attestation.data.slot, + "error" => ?e, + ); + continue; + } + }; + validator_monitor.register_attestation_in_block( + indexed_attestation, + parent_block_slot, + &self.spec, + ); + } + + for exit in block.body().voluntary_exits() { + validator_monitor.register_block_voluntary_exit(&exit.message) + } + + for slashing in block.body().attester_slashings() { + validator_monitor.register_block_attester_slashing(slashing) + } + + for slashing in block.body().proposer_slashings() { + validator_monitor.register_block_proposer_slashing(slashing) + } + } + + /// Iterate through the attestations in the block and register them as "observed". + /// + /// This will stop us from propagating them on the gossip network. + fn import_block_observe_attestations( + &self, + block: BeaconBlockRef, + state: &BeaconState, + ctxt: &mut ConsensusContext, + current_epoch: Epoch, + ) { + // To avoid slowing down sync, only observe attestations if the block is from the + // previous epoch or later. + if state.current_epoch() + 1 < current_epoch { + return; + } + + let _timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); + + for a in block.body().attestations() { + match self.observed_attestations.write().observe_item(a, None) { + // If the observation was successful or if the slot for the attestation was too + // low, continue. + // + // We ignore `SlotTooLow` since this will be very common whilst syncing. + Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {} + Err(e) => { + debug!( + self.log, + "Failed to register observed attestation"; + "error" => ?e, + "epoch" => a.data.target.epoch + ); + } + } + + let indexed_attestation = match ctxt.get_indexed_attestation(state, a) { + Ok(indexed) => indexed, + Err(e) => { + debug!( + self.log, + "Failed to get indexed attestation"; + "purpose" => "observation", + "attestation_slot" => a.data.slot, + "error" => ?e, + ); + continue; + } + }; + + let mut observed_block_attesters = self.observed_block_attesters.write(); + + for &validator_index in &indexed_attestation.attesting_indices { + if let Err(e) = observed_block_attesters + .observe_validator(a.data.target.epoch, validator_index as usize) + { + debug!( + self.log, + "Failed to register observed block attester"; + "error" => ?e, + "epoch" => a.data.target.epoch, + "validator_index" => validator_index, + ) + } + } + } + } + + /// If a slasher is configured, provide the attestations from the block. + fn import_block_update_slasher( + &self, + block: BeaconBlockRef, + state: &BeaconState, + ctxt: &mut ConsensusContext, + ) { + if let Some(slasher) = self.slasher.as_ref() { + for attestation in block.body().attestations() { + let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) { + Ok(indexed) => indexed, + Err(e) => { + debug!( + self.log, + "Failed to get indexed attestation"; + "purpose" => "slasher", + "attestation_slot" => attestation.data.slot, + "error" => ?e, + ); + continue; + } + }; + slasher.accept_attestation(indexed_attestation.clone()); + } + } + } + + fn import_block_update_metrics_and_events( + &self, + block: BeaconBlockRef, + block_root: Hash256, + block_time_imported: Duration, + payload_verification_status: PayloadVerificationStatus, + current_slot: Slot, + ) { + // Only present some metrics for blocks from the previous epoch or later. + // + // This helps avoid noise in the metrics during sync. + if block.slot() + 2 * T::EthSpec::slots_per_epoch() >= current_slot { + metrics::observe( + &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, + block.body().attestations().len() as f64, + ); + + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + metrics::set_gauge( + &metrics::BLOCK_SYNC_AGGREGATE_SET_BITS, + sync_aggregate.num_set_bits() as i64, + ); + } + } + + let block_delay_total = + get_slot_delay_ms(block_time_imported, block.slot(), &self.slot_clock); // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to // the cache during sync. @@ -3117,62 +3244,105 @@ impl BeaconChain { ); } - // Do not write to eth1 finalization cache for blocks older than 5 epochs - // this helps reduce noise during sync - if block_delay_total - < self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32) - { - let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch()); - if parent_block_epoch < current_epoch { - // we've crossed epoch boundary, store Eth1FinalizationData - let (checkpoint, eth1_finalization_data) = - if current_slot % T::EthSpec::slots_per_epoch() == 0 { - // current block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: block_root, - }, - current_eth1_finalization_data, - ) - } else { - // parent block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: parent_block.canonical_root(), - }, - parent_eth1_finalization_data, - ) - }; + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_block_subscribers() { + event_handler.register(EventKind::Block(SseBlock { + slot: block.slot(), + block: block_root, + execution_optimistic: payload_verification_status.is_optimistic(), + })); + } + } + } - if let Some(finalized_eth1_data) = self - .eth1_finalization_cache - .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) - .and_then(|mut cache| { - cache.insert(checkpoint, eth1_finalization_data); - cache.finalize(¤t_finalized_checkpoint) - }) - { - if let Some(eth1_chain) = self.eth1_chain.as_ref() { - let finalized_deposit_count = finalized_eth1_data.deposit_count; - eth1_chain.finalize_eth1_data(finalized_eth1_data); - debug!( - self.log, - "called eth1_chain.finalize_eth1_data()"; - "epoch" => current_finalized_checkpoint.epoch, - "deposit count" => finalized_deposit_count, - ); - } + fn import_block_update_shuffling_cache( + &self, + block_root: Hash256, + state: &mut BeaconState, + ) -> Result<(), BlockError> { + // For the current and next epoch of this state, ensure we have the shuffling from this + // block in our cache. + for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] { + let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?; + + let shuffling_is_cached = self + .shuffling_cache + .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or(Error::AttestationCacheLockTimeout)? + .contains(&shuffling_id); + + if !shuffling_is_cached { + state.build_committee_cache(relative_epoch, &self.spec)?; + let committee_cache = state.committee_cache(relative_epoch)?; + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or(Error::AttestationCacheLockTimeout)? + .insert_committee_cache(shuffling_id, committee_cache); + } + } + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + fn import_block_update_deposit_contract_finalization( + &self, + block: BeaconBlockRef, + block_root: Hash256, + current_epoch: Epoch, + current_finalized_checkpoint: Checkpoint, + current_eth1_finalization_data: Eth1FinalizationData, + parent_eth1_finalization_data: Eth1FinalizationData, + parent_block_slot: Slot, + ) { + // Do not write to eth1 finalization cache for blocks older than 5 epochs. + if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 5 < current_epoch { + return; + } + + let parent_block_epoch = parent_block_slot.epoch(T::EthSpec::slots_per_epoch()); + if parent_block_epoch < current_epoch { + // we've crossed epoch boundary, store Eth1FinalizationData + let (checkpoint, eth1_finalization_data) = + if block.slot() % T::EthSpec::slots_per_epoch() == 0 { + // current block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: block_root, + }, + current_eth1_finalization_data, + ) + } else { + // parent block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: block.parent_root(), + }, + parent_eth1_finalization_data, + ) + }; + + if let Some(finalized_eth1_data) = self + .eth1_finalization_cache + .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) + .and_then(|mut cache| { + cache.insert(checkpoint, eth1_finalization_data); + cache.finalize(¤t_finalized_checkpoint) + }) + { + if let Some(eth1_chain) = self.eth1_chain.as_ref() { + let finalized_deposit_count = finalized_eth1_data.deposit_count; + eth1_chain.finalize_eth1_data(finalized_eth1_data); + debug!( + self.log, + "called eth1_chain.finalize_eth1_data()"; + "epoch" => current_finalized_checkpoint.epoch, + "deposit count" => finalized_deposit_count, + ); } } } - - // Inform the unknown block cache, in case it was waiting on this block. - self.pre_finalization_block_cache - .block_processed(block_root); - - Ok(block_root) } /// If configured, wait for the fork choice run at the start of the slot to complete. @@ -3553,10 +3723,12 @@ impl BeaconChain { // This will be a lot slower but guards against bugs in block production and can be // quickly rolled out without a release. if self.config.paranoid_block_proposal { + let mut tmp_ctxt = ConsensusContext::new(state.slot()); attestations.retain(|att| { verify_attestation_for_block_inclusion( &state, att, + &mut tmp_ctxt, VerifySignatures::True, &self.spec, ) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 104de57dbf..ab317e96b9 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -45,29 +45,29 @@ use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - AllowOptimisticImport, PayloadNotifier, + AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ beacon_chain::{ - BeaconForkChoice, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, + MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; -use fork_choice::PayloadVerificationStatus; +use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -use state_processing::per_block_processing::is_merge_transition_block; +use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, @@ -550,8 +550,22 @@ pub fn signature_verify_chain_segment( let pubkey_cache = get_validator_pubkey_cache(chain)?; let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len()); + for (block_root, block) in &chain_segment { - signature_verifier.include_all_signatures(block, Some(*block_root), None)?; + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(*block_root); + + signature_verifier.include_all_signatures(block, &mut consensus_context)?; + + // Save the block and its consensus context. The context will have had its proposer index + // and attesting indices filled in, which can be used to accelerate later block processing. + signature_verified_blocks.push(SignatureVerifiedBlock { + block: block.clone(), + block_root: *block_root, + parent: None, + consensus_context, + }); } if signature_verifier.verify().is_err() { @@ -560,22 +574,6 @@ pub fn signature_verify_chain_segment( drop(pubkey_cache); - let mut signature_verified_blocks = chain_segment - .into_iter() - .map(|(block_root, block)| { - // Proposer index has already been verified above during signature verification. - let consensus_context = ConsensusContext::new(block.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); - SignatureVerifiedBlock { - block, - block_root, - parent: None, - consensus_context, - } - }) - .collect::>(); - if let Some(signature_verified_block) = signature_verified_blocks.first_mut() { signature_verified_block.parent = Some(parent); } @@ -625,6 +623,7 @@ pub struct ExecutionPendingBlock { pub parent_block: SignedBeaconBlock>, pub parent_eth1_finalization_data: Eth1FinalizationData, pub confirmed_state_roots: Vec, + pub consensus_context: ConsensusContext, pub payload_verification_handle: PayloadVerificationHandle, } @@ -636,8 +635,9 @@ pub trait IntoExecutionPendingBlock: Sized { self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockError> { - self.into_execution_pending_block_slashable(block_root, chain) + self.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { @@ -653,6 +653,7 @@ pub trait IntoExecutionPendingBlock: Sized { self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; @@ -899,10 +900,15 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; - execution_pending.into_execution_pending_block_slashable(block_root, chain) + execution_pending.into_execution_pending_block_slashable( + block_root, + chain, + notify_execution_layer, + ) } fn block(&self) -> &SignedBeaconBlock { @@ -944,13 +950,14 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - signature_verifier.include_all_signatures(&block, Some(block_root), None)?; + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(block_root); + + signature_verifier.include_all_signatures(&block, &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { - consensus_context: ConsensusContext::new(block.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()), + consensus_context, block, block_root, parent: Some(parent), @@ -995,16 +1002,16 @@ impl SignatureVerifiedBlock { // Gossip verification has already checked the proposer index. Use it to check the RANDAO // signature. - let verified_proposer_index = Some(block.message().proposer_index()); + let mut consensus_context = from.consensus_context; signature_verifier - .include_all_signatures_except_proposal(&block, verified_proposer_index)?; + .include_all_signatures_except_proposal(&block, &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { block, block_root: from.block_root, parent: Some(parent), - consensus_context: from.consensus_context, + consensus_context, }) } else { Err(BlockError::InvalidSignature) @@ -1032,6 +1039,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { @@ -1047,6 +1055,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc parent, self.consensus_context, chain, + notify_execution_layer, ) .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) } @@ -1063,13 +1072,14 @@ impl IntoExecutionPendingBlock for Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? - .into_execution_pending_block_slashable(block_root, chain) + .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) } fn block(&self) -> &SignedBeaconBlock { @@ -1091,6 +1101,7 @@ impl ExecutionPendingBlock { parent: PreProcessingSnapshot, mut consensus_context: ConsensusContext, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { if let Some(parent) = chain .canonical_head @@ -1127,6 +1138,79 @@ impl ExecutionPendingBlock { check_block_relevancy(&block, block_root, chain)?; + // Define a future that will verify the execution payload with an execution engine. + // + // We do this as early as possible so that later parts of this function can run in parallel + // with the payload verification. + let payload_notifier = PayloadNotifier::new( + chain.clone(), + block.clone(), + &parent.pre_state, + notify_execution_layer, + )?; + let is_valid_merge_transition_block = + is_merge_transition_block(&parent.pre_state, block.message().body()); + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + let block = payload_notifier.block.clone(); + + // If this block triggers the merge, check to ensure that it references valid execution + // blocks. + // + // The specification defines this check inside `on_block` in the fork-choice specification, + // however we perform the check here for two reasons: + // + // - There's no point in importing a block that will fail fork choice, so it's best to fail + // early. + // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no + // calls to remote servers. + if is_valid_merge_transition_block { + validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; + }; + + // The specification declares that this should be run *inside* `per_block_processing`, + // however we run it here to keep `per_block_processing` pure (i.e., no calls to external + // servers). + let payload_verification_status = payload_notifier.notify_new_payload().await?; + + // If the payload did not validate or invalidate the block, check to see if this block is + // valid for optimistic import. + if payload_verification_status.is_optimistic() { + let block_hash_opt = block + .message() + .body() + .execution_payload() + .map(|full_payload| full_payload.execution_payload.block_hash); + + // Ensure the block is a candidate for optimistic import. + if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? + { + warn!( + chain.log, + "Rejecting optimistic block"; + "block_hash" => ?block_hash_opt, + "msg" => "the execution engine is not synced" + ); + return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + } + } + + Ok(PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + }) + }; + // Spawn the payload verification future as a new task, but don't wait for it to complete. + // The `payload_verification_future` will be awaited later to ensure verification completed + // successfully. + let payload_verification_handle = chain + .task_executor + .spawn_handle( + payload_verification_future, + "execution_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; + /* * Advance the given `parent.beacon_state` to the slot of the given `block`. */ @@ -1231,79 +1315,11 @@ impl ExecutionPendingBlock { summaries.push(summary); } } + metrics::stop_timer(catchup_timer); let block_slot = block.slot(); let state_current_epoch = state.current_epoch(); - // Define a future that will verify the execution payload with an execution engine (but - // don't execute it yet). - let payload_notifier = PayloadNotifier::new(chain.clone(), block.clone(), &state)?; - let is_valid_merge_transition_block = - is_merge_transition_block(&state, block.message().body()); - let payload_verification_future = async move { - let chain = payload_notifier.chain.clone(); - let block = payload_notifier.block.clone(); - - // If this block triggers the merge, check to ensure that it references valid execution - // blocks. - // - // The specification defines this check inside `on_block` in the fork-choice specification, - // however we perform the check here for two reasons: - // - // - There's no point in importing a block that will fail fork choice, so it's best to fail - // early. - // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no - // calls to remote servers. - if is_valid_merge_transition_block { - validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; - }; - - // The specification declares that this should be run *inside* `per_block_processing`, - // however we run it here to keep `per_block_processing` pure (i.e., no calls to external - // servers). - // - // It is important that this function is called *after* `per_slot_processing`, since the - // `randao` may change. - let payload_verification_status = payload_notifier.notify_new_payload().await?; - - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let block_hash_opt = block - .message() - .body() - .execution_payload() - .map(|full_payload| full_payload.execution_payload.block_hash); - - // Ensure the block is a candidate for optimistic import. - if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? - { - warn!( - chain.log, - "Rejecting optimistic block"; - "block_hash" => ?block_hash_opt, - "msg" => "the execution engine is not synced" - ); - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); - } - } - - Ok(PayloadVerificationOutcome { - payload_verification_status, - is_valid_merge_transition_block, - }) - }; - // Spawn the payload verification future as a new task, but don't wait for it to complete. - // The `payload_verification_future` will be awaited later to ensure verification completed - // successfully. - let payload_verification_handle = chain - .task_executor - .spawn_handle( - payload_verification_future, - "execution_payload_verification", - ) - .ok_or(BeaconChainError::RuntimeShutdown)?; - // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); @@ -1330,8 +1346,6 @@ impl ExecutionPendingBlock { } } - metrics::stop_timer(catchup_timer); - /* * Build the committee caches on the state. */ @@ -1421,6 +1435,44 @@ impl ExecutionPendingBlock { }); } + /* + * Apply the block's attestations to fork choice. + * + * We're running in parallel with the payload verification at this point, so this is + * free real estate. + */ + let current_slot = chain.slot()?; + let mut fork_choice = chain.canonical_head.fork_choice_write_lock(); + + // Register each attester slashing in the block with fork choice. + for attester_slashing in block.message().body().attester_slashings() { + fork_choice.on_attester_slashing(attester_slashing); + } + + // Register each attestation in the block with fork choice. + for (i, attestation) in block.message().body().attestations().iter().enumerate() { + let _fork_choice_attestation_timer = + metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); + + let indexed_attestation = consensus_context + .get_indexed_attestation(&state, attestation) + .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; + + match fork_choice.on_attestation( + current_slot, + indexed_attestation, + AttestationFromBlock::True, + &chain.spec, + ) { + Ok(()) => Ok(()), + // Ignore invalid attestations whilst importing attestations from a block. The + // block might be very old and therefore the attestations useless to fork choice. + Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()), + Err(e) => Err(BlockError::BeaconChainError(e.into())), + }?; + } + drop(fork_choice); + Ok(Self { block, block_root, @@ -1428,6 +1480,7 @@ impl ExecutionPendingBlock { parent_block: parent.beacon_block, parent_eth1_finalization_data, confirmed_state_roots, + consensus_context, payload_verification_handle, }) } diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index f970c5607e..286cc17a96 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -47,8 +47,6 @@ pub struct ChainConfig { pub count_unrealized_full: CountUnrealizedFull, /// Optionally set timeout for calls to checkpoint sync endpoint. pub checkpoint_sync_url_timeout: u64, - /// Whether to enable the light client server protocol. - pub enable_light_client_server: bool, } impl Default for ChainConfig { @@ -70,7 +68,6 @@ impl Default for ChainConfig { paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, - enable_light_client_server: false, } } } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 2221d1fc7c..85f7629bb7 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -35,6 +35,16 @@ pub enum AllowOptimisticImport { No, } +/// Signal whether the execution payloads of new blocks should be +/// immediately verified with the EL or imported optimistically without +/// any EL communication. +#[derive(Default, Clone, Copy)] +pub enum NotifyExecutionLayer { + #[default] + Yes, + No, +} + /// Used to await the result of executing payload with a remote EE. pub struct PayloadNotifier { pub chain: Arc>, @@ -47,21 +57,28 @@ impl PayloadNotifier { chain: Arc>, block: Arc>, state: &BeaconState, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { - let payload_verification_status = if is_execution_enabled(state, block.message().body()) { - // Perform the initial stages of payload verification. - // - // We will duplicate these checks again during `per_block_processing`, however these checks - // are cheap and doing them here ensures we protect the execution engine from junk. - partially_verify_execution_payload( - state, - block.message().execution_payload()?, - &chain.spec, - ) - .map_err(BlockError::PerBlockProcessingError)?; - None - } else { - Some(PayloadVerificationStatus::Irrelevant) + let payload_verification_status = match notify_execution_layer { + NotifyExecutionLayer::No => Some(PayloadVerificationStatus::Optimistic), + NotifyExecutionLayer::Yes => { + if is_execution_enabled(state, block.message().body()) { + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these checks + // are cheap and doing them here ensures we protect the execution engine from junk. + partially_verify_execution_payload( + state, + block.slot(), + block.message().execution_payload()?, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; + None + } else { + Some(PayloadVerificationStatus::Irrelevant) + } + } }; Ok(Self { @@ -357,7 +374,8 @@ pub fn get_execution_payload< let spec = &chain.spec; let current_epoch = state.current_epoch(); let is_merge_transition_complete = is_merge_transition_complete(state); - let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; + let timestamp = + compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 5ead5311e5..3889fe4aa5 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -63,6 +63,7 @@ pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; pub use execution_layer::EngineState; +pub use execution_payload::NotifyExecutionLayer; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ead4a54025..b37c5afc35 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -64,6 +64,11 @@ lazy_static! { "beacon_block_processing_state_root_seconds", "Time spent calculating the state root when processing a block." ); + pub static ref BLOCK_PROCESSING_POST_EXEC_PROCESSING: Result = try_create_histogram_with_buckets( + "beacon_block_processing_post_exec_pre_attestable_seconds", + "Time between finishing execution processing and the block becoming attestable", + linear_buckets(5e-3, 5e-3, 10) + ); pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( "beacon_block_processing_db_write_seconds", "Time spent writing a newly processed block and state to DB" diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a1c7acf173..b88966b41a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2,7 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, - BeaconChainError, ProduceBlockVerification, + BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; use crate::{ builder::{BeaconChainBuilder, Witness}, @@ -586,7 +586,7 @@ where pub fn get_timestamp_at_slot(&self) -> u64 { let state = self.get_current_state(); - compute_timestamp_at_slot(&state, &self.spec).unwrap() + compute_timestamp_at_slot(&state, state.slot(), &self.spec).unwrap() } pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { @@ -1460,7 +1460,12 @@ where self.set_current_slot(slot); let block_hash: SignedBeaconBlockHash = self .chain - .process_block(block_root, Arc::new(block), CountUnrealized::True) + .process_block( + block_root, + Arc::new(block), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await? .into(); self.chain.recompute_head_at_current_slot().await; @@ -1477,6 +1482,7 @@ where block.canonical_root(), Arc::new(block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await? .into(); diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 60fdb607c8..26aea2d272 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -3,7 +3,8 @@ use crate::{BeaconChainTypes, BeaconStore}; use ssz::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryInto; -use store::{DBColumn, Error as StoreError, StoreItem}; +use std::marker::PhantomData; +use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp, StoreItem}; use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// Provides a mapping of `validator_index -> validator_publickey`. @@ -14,21 +15,17 @@ use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// 2. To reduce the amount of public key _decompression_ required. A `BeaconState` stores public /// keys in compressed form and they are needed in decompressed form for signature verification. /// Decompression is expensive when many keys are involved. -/// -/// The cache has a `backing` that it uses to maintain a persistent, on-disk -/// copy of itself. This allows it to be restored between process invocations. pub struct ValidatorPubkeyCache { pubkeys: Vec, indices: HashMap, pubkey_bytes: Vec, - store: BeaconStore, + _phantom: PhantomData, } impl ValidatorPubkeyCache { /// Create a new public key cache using the keys in `state.validators`. /// - /// Also creates a new persistence file, returning an error if there is already a file at - /// `persistence_path`. + /// The new cache will be updated with the keys from `state` and immediately written to disk. pub fn new( state: &BeaconState, store: BeaconStore, @@ -37,10 +34,11 @@ impl ValidatorPubkeyCache { pubkeys: vec![], indices: HashMap::new(), pubkey_bytes: vec![], - store, + _phantom: PhantomData, }; - cache.import_new_pubkeys(state)?; + let store_ops = cache.import_new_pubkeys(state)?; + store.hot_db.do_atomically(store_ops)?; Ok(cache) } @@ -69,17 +67,19 @@ impl ValidatorPubkeyCache { pubkeys, indices, pubkey_bytes, - store, + _phantom: PhantomData, }) } /// Scan the given `state` and add any new validator public keys. /// /// Does not delete any keys from `self` if they don't appear in `state`. + /// + /// NOTE: The caller *must* commit the returned I/O batch as part of the block import process. pub fn import_new_pubkeys( &mut self, state: &BeaconState, - ) -> Result<(), BeaconChainError> { + ) -> Result, BeaconChainError> { if state.validators().len() > self.pubkeys.len() { self.import( state.validators()[self.pubkeys.len()..] @@ -87,12 +87,12 @@ impl ValidatorPubkeyCache { .map(|v| v.pubkey), ) } else { - Ok(()) + Ok(vec![]) } } /// Adds zero or more validators to `self`. - fn import(&mut self, validator_keys: I) -> Result<(), BeaconChainError> + fn import(&mut self, validator_keys: I) -> Result, BeaconChainError> where I: Iterator + ExactSizeIterator, { @@ -100,6 +100,7 @@ impl ValidatorPubkeyCache { self.pubkeys.reserve(validator_keys.len()); self.indices.reserve(validator_keys.len()); + let mut store_ops = Vec::with_capacity(validator_keys.len()); for pubkey in validator_keys { let i = self.pubkeys.len(); @@ -107,17 +108,11 @@ impl ValidatorPubkeyCache { return Err(BeaconChainError::DuplicateValidatorPublicKey); } - // The item is written to disk _before_ it is written into - // the local struct. - // - // This means that a pubkey cache read from disk will always be equivalent to or - // _later than_ the cache that was running in the previous instance of Lighthouse. - // - // The motivation behind this ordering is that we do not want to have states that - // reference a pubkey that is not in our cache. However, it's fine to have pubkeys - // that are never referenced in a state. - self.store - .put_item(&DatabasePubkey::key_for_index(i), &DatabasePubkey(pubkey))?; + // Stage the new validator key for writing to disk. + // It will be committed atomically when the block that introduced it is written to disk. + // Notably it is NOT written while the write lock on the cache is held. + // See: https://github.com/sigp/lighthouse/issues/2327 + store_ops.push(DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i))); self.pubkeys.push( (&pubkey) @@ -129,7 +124,7 @@ impl ValidatorPubkeyCache { self.indices.insert(pubkey, i); } - Ok(()) + Ok(store_ops) } /// Get the public key for a validator with index `i`. @@ -296,9 +291,10 @@ mod test { // Add some more keypairs. let (state, keypairs) = get_state(12); - cache + let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); + store.hot_db.do_atomically(ops).unwrap(); check_cache_get(&cache, &keypairs[..]); drop(cache); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 998f22f770..38a55e2212 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -3,7 +3,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult}; +use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer}; use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; @@ -147,14 +147,18 @@ async fn chain_segment_full_segment() { // Sneak in a little check to ensure we can process empty chain segments. harness .chain - .process_chain_segment(vec![], CountUnrealized::True) + .process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error() .expect("should import empty chain segment"); harness .chain - .process_chain_segment(blocks.clone(), CountUnrealized::True) + .process_chain_segment( + blocks.clone(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .expect("should import chain segment"); @@ -183,7 +187,11 @@ async fn chain_segment_varying_chunk_size() { for chunk in blocks.chunks(*chunk_size) { harness .chain - .process_chain_segment(chunk.to_vec(), CountUnrealized::True) + .process_chain_segment( + chunk.to_vec(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); @@ -219,7 +227,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -239,7 +247,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -270,7 +278,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -291,7 +299,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -317,7 +325,7 @@ async fn assert_invalid_signature( matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -339,7 +347,11 @@ async fn assert_invalid_signature( // imported prior to this test. let _ = harness .chain - .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .process_chain_segment( + ancestor_blocks, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await; harness.chain.recompute_head_at_current_slot().await; @@ -349,6 +361,7 @@ async fn assert_invalid_signature( snapshots[block_index].beacon_block.canonical_root(), snapshots[block_index].beacon_block.clone(), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await; assert!( @@ -400,7 +413,11 @@ async fn invalid_signature_gossip_block() { .collect(); harness .chain - .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .process_chain_segment( + ancestor_blocks, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .expect("should import all blocks prior to the one being tested"); @@ -412,7 +429,8 @@ async fn invalid_signature_gossip_block() { .process_block( signed_block.canonical_root(), Arc::new(signed_block), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await, Err(BlockError::InvalidSignature) @@ -446,7 +464,7 @@ async fn invalid_signature_block_proposal() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -644,7 +662,7 @@ async fn invalid_signature_deposit() { !matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -725,6 +743,7 @@ async fn block_gossip_verification() { gossip_verified.block_root, gossip_verified, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .expect("should import valid gossip verified block"); @@ -996,6 +1015,7 @@ async fn verify_block_for_gossip_slashing_detection() { verified_block.block_root, verified_block, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1035,6 +1055,7 @@ async fn verify_block_for_gossip_doppelganger_detection() { verified_block.block_root, verified_block, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1180,7 +1201,8 @@ async fn add_base_block_to_altair_chain() { .process_block( base_block.canonical_root(), Arc::new(base_block.clone()), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .err() @@ -1195,7 +1217,11 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(base_block)], CountUnrealized::True) + .process_chain_segment( + vec![Arc::new(base_block)], + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1313,7 +1339,8 @@ async fn add_altair_block_to_base_chain() { .process_block( altair_block.canonical_root(), Arc::new(altair_block.clone()), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .err() @@ -1328,7 +1355,11 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(altair_block)], CountUnrealized::True) + .process_chain_segment( + vec![Arc::new(altair_block)], + CountUnrealized::True, + NotifyExecutionLayer::Yes + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2336c3ba99..d77cc19678 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -7,8 +7,8 @@ use beacon_chain::otb_verification_service::{ use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, StateSkipConfig, + WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ @@ -693,6 +693,7 @@ async fn invalidates_all_descendants() { fork_block.canonical_root(), Arc::new(fork_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -789,6 +790,7 @@ async fn switches_heads() { fork_block.canonical_root(), Arc::new(fork_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1035,7 +1037,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True).await, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1317,7 +1319,12 @@ async fn build_optimistic_chain( for block in blocks { rig.harness .chain - .process_block(block.canonical_root(), block, CountUnrealized::True) + .process_block( + block.canonical_root(), + block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .unwrap(); } @@ -1879,6 +1886,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { fork_block.canonical_root(), fork_block.clone(), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index b1907bc96e..b2fc7a6402 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,8 +7,8 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, - BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler, - WhenSlotSkipped, + BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, + ServerSentEventHandler, WhenSlotSkipped, }; use fork_choice::CountUnrealized; use lazy_static::lazy_static; @@ -2148,6 +2148,7 @@ async fn weak_subjectivity_sync() { full_block.canonical_root(), Arc::new(full_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index a13946bf2b..d80db132ef 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,7 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - BeaconChain, StateSkipConfig, WhenSlotSkipped, + BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; use fork_choice::CountUnrealized; use lazy_static::lazy_static; @@ -687,7 +687,8 @@ async fn run_skip_slot_test(skip_slots: u64) { .process_block( harness_a.chain.head_snapshot().beacon_block_root, harness_a.chain.head_snapshot().beacon_block.clone(), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(), diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 5e43c1eaad..0a2997762a 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -42,7 +42,7 @@ pub enum ClientGenesis { /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { - pub data_dir: PathBuf, + data_dir: PathBuf, /// Name of the directory inside the data directory where the main "hot" DB is located. pub db_name: String, /// Path where the freezer database will be located. @@ -103,6 +103,17 @@ impl Default for Config { } impl Config { + /// Updates the data directory for the Client. + pub fn set_data_dir(&mut self, data_dir: PathBuf) { + self.data_dir = data_dir.clone(); + self.http_api.data_dir = data_dir; + } + + /// Gets the config's data_dir. + pub fn data_dir(&self) -> &PathBuf { + &self.data_dir + } + /// Get the database path without initialising it. pub fn get_db_path(&self) -> PathBuf { self.get_data_dir().join(&self.db_name) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index f24b746cd4..31082394ba 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -751,10 +751,11 @@ impl Service { let deposit_count_to_finalize = eth1data_to_finalize.deposit_count; if deposit_count_to_finalize > already_finalized { match self.finalize_deposits(eth1data_to_finalize) { - Err(e) => error!( + Err(e) => warn!( self.log, "Failed to finalize deposit cache"; "error" => ?e, + "info" => "this should resolve on its own" ), Ok(()) => info!( self.log, @@ -814,9 +815,10 @@ impl Service { .block_by_hash(ð1_data.block_hash) .cloned() .ok_or_else(|| { - Error::FailedToFinalizeDeposit( - "Finalized block not found in block cache".to_string(), - ) + Error::FailedToFinalizeDeposit(format!( + "Finalized block not found in block cache: {:?}", + eth1_data.block_hash + )) })?; self.inner .deposit_cache diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f222f28c33..2a2225cbdf 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -12,6 +12,7 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkChoiceState}; +use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -21,11 +22,13 @@ use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; +use std::fmt; use std::future::Future; use std::io::Write; use std::path::PathBuf; use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use strum::AsRefStr; use task_executor::TaskExecutor; use tokio::{ sync::{Mutex, MutexGuard, RwLock}, @@ -34,7 +37,7 @@ use tokio::{ use tokio_stream::wrappers::WatchStream; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, - ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, + ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, }; mod engine_api; @@ -66,6 +69,14 @@ const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); +/// A payload alongside some information about where it came from. +enum ProvenancedPayload

{ + /// A good ol' fashioned farm-to-table payload from your local EE. + Local(P), + /// A payload from a builder (e.g. mev-boost). + Builder(P), +} + #[derive(Debug)] pub enum Error { NoEngine, @@ -73,6 +84,7 @@ pub enum Error { ApiError(ApiError), Builder(builder_client::Error), NoHeaderFromBuilder, + CannotProduceHeader, EngineError(Box), NotSynced, ShuttingDown, @@ -550,7 +562,7 @@ impl ExecutionLayer { ) -> Result { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; - match Payload::block_type() { + let payload_result = match Payload::block_type() { BlockType::Blinded => { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, @@ -580,6 +592,40 @@ impl ExecutionLayer { forkchoice_update_params, ) .await + .map(ProvenancedPayload::Local) + } + }; + + // Track some metrics and return the result. + match payload_result { + Ok(ProvenancedPayload::Local(payload)) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, + &[metrics::LOCAL], + ); + Ok(payload) + } + Ok(ProvenancedPayload::Builder(payload)) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, + &[metrics::BUILDER], + ); + Ok(payload) + } + Err(e) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::FAILURE], + ); + Err(e) } } } @@ -594,7 +640,7 @@ impl ExecutionLayer { forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { if let Some(builder) = self.builder() { let slot = builder_params.slot; let pubkey = builder_params.pubkey; @@ -608,130 +654,202 @@ impl ExecutionLayer { "pubkey" => ?pubkey, "parent_hash" => ?parent_hash, ); - let (relay_result, local_result) = tokio::join!( - builder.get_builder_header::(slot, parent_hash, &pubkey), - self.get_full_payload_caching( - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - forkchoice_update_params, - ) + + // Wait for the builder *and* local EL to produce a payload (or return an error). + let ((relay_result, relay_duration), (local_result, local_duration)) = tokio::join!( + timed_future(metrics::GET_BLINDED_PAYLOAD_BUILDER, async { + builder + .get_builder_header::(slot, parent_hash, &pubkey) + .await + }), + timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { + self.get_full_payload_caching::( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + ) + .await + }) + ); + + info!( + self.log(), + "Requested blinded execution payload"; + "relay_fee_recipient" => match &relay_result { + Ok(Some(r)) => format!("{:?}", r.data.message.header.fee_recipient()), + Ok(None) => "empty response".to_string(), + Err(_) => "request failed".to_string(), + }, + "relay_response_ms" => relay_duration.as_millis(), + "local_fee_recipient" => match &local_result { + Ok(header) => format!("{:?}", header.fee_recipient()), + Err(_) => "request failed".to_string() + }, + "local_response_ms" => local_duration.as_millis(), + "parent_hash" => ?parent_hash, ); return match (relay_result, local_result) { (Err(e), Ok(local)) => { warn!( self.log(), - "Unable to retrieve a payload from a connected \ - builder, falling back to the local execution client: {e:?}" + "Builder error when requesting payload"; + "info" => "falling back to local execution client", + "relay_error" => ?e, + "local_block_hash" => ?local.block_hash(), + "parent_hash" => ?parent_hash, ); - Ok(local) + Ok(ProvenancedPayload::Local(local)) } (Ok(None), Ok(local)) => { info!( self.log(), - "No payload provided by connected builder. \ - Attempting to propose through local execution engine" + "Builder did not return a payload"; + "info" => "falling back to local execution client", + "local_block_hash" => ?local.block_hash(), + "parent_hash" => ?parent_hash, ); - Ok(local) + Ok(ProvenancedPayload::Local(local)) } (Ok(Some(relay)), Ok(local)) => { - let is_signature_valid = relay.data.verify_signature(spec); - let header = relay.data.message.header; + let header = &relay.data.message.header; info!( self.log(), - "Received a payload header from the connected builder"; - "block_hash" => ?header.block_hash(), + "Received local and builder payloads"; + "relay_block_hash" => ?header.block_hash(), + "local_block_hash" => ?local.block_hash(), + "parent_hash" => ?parent_hash, ); - let relay_value = relay.data.message.value; - let configured_value = self.inner.builder_profit_threshold; - if relay_value < configured_value { - info!( - self.log(), - "The value offered by the connected builder does not meet \ - the configured profit threshold. Using local payload."; - "configured_value" => ?configured_value, "relay_value" => ?relay_value - ); - Ok(local) - } else if header.parent_hash() != parent_hash { - warn!( - self.log(), - "Invalid parent hash from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if header.prev_randao() != prev_randao { - warn!( - self.log(), - "Invalid prev randao from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if header.timestamp() != local.timestamp() { - warn!( - self.log(), - "Invalid timestamp from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if header.block_number() != local.block_number() { - warn!( - self.log(), - "Invalid block number from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if !matches!(relay.version, Some(ForkName::Merge)) { - // Once fork information is added to the payload, we will need to - // check that the local and relay payloads match. At this point, if - // we are requesting a payload at all, we have to assume this is - // the Bellatrix fork. - warn!( - self.log(), - "Invalid fork from connected builder, falling \ - back to local execution engine." - ); - Ok(local) - } else if !is_signature_valid { - let pubkey_bytes = relay.data.message.pubkey; - warn!(self.log(), "Invalid signature for pubkey {pubkey_bytes} on \ - bid from connected builder, falling back to local execution engine."); - Ok(local) - } else { - if header.fee_recipient() != suggested_fee_recipient { + match verify_builder_bid( + &relay, + parent_hash, + prev_randao, + timestamp, + Some(local.block_number()), + self.inner.builder_profit_threshold, + spec, + ) { + Ok(()) => { + Ok(ProvenancedPayload::Builder(relay.data.message.header)) + } + Err(reason) if !reason.payload_invalid() => { info!( self.log(), - "Fee recipient from connected builder does \ - not match, using it anyways." + "Builder payload ignored"; + "info" => "using local payload", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, ); + Ok(ProvenancedPayload::Local(local)) + } + Err(reason) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, + &[reason.as_ref().as_ref()], + ); + warn!( + self.log(), + "Builder returned invalid payload"; + "info" => "using local payload", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, + ); + Ok(ProvenancedPayload::Local(local)) } - Ok(header) } } - (relay_result, Err(local_error)) => { - warn!(self.log(), "Failure from local execution engine. Attempting to \ - propose through connected builder"; "error" => ?local_error); - relay_result - .map_err(Error::Builder)? - .ok_or(Error::NoHeaderFromBuilder) - .map(|d| d.data.message.header) + (Ok(Some(relay)), Err(local_error)) => { + let header = &relay.data.message.header; + + info!( + self.log(), + "Received builder payload with local error"; + "relay_block_hash" => ?header.block_hash(), + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + match verify_builder_bid( + &relay, + parent_hash, + prev_randao, + timestamp, + None, + self.inner.builder_profit_threshold, + spec, + ) { + Ok(()) => { + Ok(ProvenancedPayload::Builder(relay.data.message.header)) + } + // If the payload is valid then use it. The local EE failed + // to produce a payload so we have no alternative. + Err(e) if !e.payload_invalid() => { + Ok(ProvenancedPayload::Builder(relay.data.message.header)) + } + Err(reason) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, + &[reason.as_ref().as_ref()], + ); + crit!( + self.log(), + "Builder returned invalid payload"; + "info" => "no local payload either - unable to propose block", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, + ); + Err(Error::CannotProduceHeader) + } + } + } + (Err(relay_error), Err(local_error)) => { + crit!( + self.log(), + "Unable to produce execution payload"; + "info" => "the local EL and builder both failed - unable to propose block", + "relay_error" => ?relay_error, + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + Err(Error::CannotProduceHeader) + } + (Ok(None), Err(local_error)) => { + crit!( + self.log(), + "Unable to produce execution payload"; + "info" => "the local EL failed and the builder returned nothing - \ + the block proposal will be missed", + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + Err(Error::CannotProduceHeader) } }; } - ChainHealth::Unhealthy(condition) => { - info!(self.log(), "Due to poor chain health the local execution engine will be used \ - for payload construction. To adjust chain health conditions \ - Use `builder-fallback` prefixed flags"; - "failed_condition" => ?condition) - } + ChainHealth::Unhealthy(condition) => info!( + self.log(), + "Chain is unhealthy, using local payload"; + "info" => "this helps protect the network. the --builder-fallback flags \ + can adjust the expected health conditions.", + "failed_condition" => ?condition + ), // Intentional no-op, so we never attempt builder API proposals pre-merge. ChainHealth::PreMerge => (), - ChainHealth::Optimistic => info!(self.log(), "The local execution engine is syncing \ - so the builder network cannot safely be used. Attempting \ - to build a block with the local execution engine"), + ChainHealth::Optimistic => info!( + self.log(), + "Chain is optimistic; can't build payload"; + "info" => "the local execution engine is syncing and the builder network \ + cannot safely be used - unable to propose block" + ), } } self.get_full_payload_caching( @@ -742,6 +860,7 @@ impl ExecutionLayer { forkchoice_update_params, ) .await + .map(ProvenancedPayload::Local) } /// Get a full payload without caching its result in the execution layer's payload cache. @@ -1404,18 +1523,223 @@ impl ExecutionLayer { "Sending block to builder"; "root" => ?block_root, ); + if let Some(builder) = self.builder() { - builder - .post_builder_blinded_blocks(block) - .await - .map_err(Error::Builder) - .map(|d| d.data) + let (payload_result, duration) = + timed_future(metrics::POST_BLINDED_PAYLOAD_BUILDER, async { + builder + .post_builder_blinded_blocks(block) + .await + .map_err(Error::Builder) + .map(|d| d.data) + }) + .await; + + match &payload_result { + Ok(payload) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + info!( + self.log(), + "Builder successfully revealed payload"; + "relay_response_ms" => duration.as_millis(), + "block_root" => ?block_root, + "fee_recipient" => ?payload.fee_recipient, + "block_hash" => ?payload.block_hash, + "parent_hash" => ?payload.parent_hash + ) + } + Err(e) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, + &[metrics::FAILURE], + ); + crit!( + self.log(), + "Builder failed to reveal payload"; + "info" => "this relay failure may cause a missed proposal", + "error" => ?e, + "relay_response_ms" => duration.as_millis(), + "block_root" => ?block_root, + "parent_hash" => ?block + .message() + .execution_payload() + .map(|payload| format!("{}", payload.parent_hash())) + .unwrap_or_else(|_| "unknown".to_string()) + ) + } + } + + payload_result } else { Err(Error::NoPayloadBuilder) } } } +#[derive(AsRefStr)] +#[strum(serialize_all = "snake_case")] +enum InvalidBuilderPayload { + LowValue { + profit_threshold: Uint256, + payload_value: Uint256, + }, + ParentHash { + payload: ExecutionBlockHash, + expected: ExecutionBlockHash, + }, + PrevRandao { + payload: Hash256, + expected: Hash256, + }, + Timestamp { + payload: u64, + expected: u64, + }, + BlockNumber { + payload: u64, + expected: Option, + }, + Fork { + payload: Option, + expected: ForkName, + }, + Signature { + signature: Signature, + pubkey: PublicKeyBytes, + }, +} + +impl InvalidBuilderPayload { + /// Returns `true` if a payload is objectively invalid and should never be included on chain. + fn payload_invalid(&self) -> bool { + match self { + // A low-value payload isn't invalid, it should just be avoided if possible. + InvalidBuilderPayload::LowValue { .. } => false, + InvalidBuilderPayload::ParentHash { .. } => true, + InvalidBuilderPayload::PrevRandao { .. } => true, + InvalidBuilderPayload::Timestamp { .. } => true, + InvalidBuilderPayload::BlockNumber { .. } => true, + InvalidBuilderPayload::Fork { .. } => true, + InvalidBuilderPayload::Signature { .. } => true, + } + } +} + +impl fmt::Display for InvalidBuilderPayload { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InvalidBuilderPayload::LowValue { + profit_threshold, + payload_value, + } => write!( + f, + "payload value of {} does not meet user-configured profit-threshold of {}", + payload_value, profit_threshold + ), + InvalidBuilderPayload::ParentHash { payload, expected } => { + write!(f, "payload block hash was {} not {}", payload, expected) + } + InvalidBuilderPayload::PrevRandao { payload, expected } => { + write!(f, "payload prev randao was {} not {}", payload, expected) + } + InvalidBuilderPayload::Timestamp { payload, expected } => { + write!(f, "payload timestamp was {} not {}", payload, expected) + } + InvalidBuilderPayload::BlockNumber { payload, expected } => { + write!(f, "payload block number was {} not {:?}", payload, expected) + } + InvalidBuilderPayload::Fork { payload, expected } => { + write!(f, "payload fork was {:?} not {}", payload, expected) + } + InvalidBuilderPayload::Signature { signature, pubkey } => write!( + f, + "invalid payload signature {} for pubkey {}", + signature, pubkey + ), + } + } +} + +/// Perform some cursory, non-exhaustive validation of the bid returned from the builder. +fn verify_builder_bid>( + bid: &ForkVersionedResponse>, + parent_hash: ExecutionBlockHash, + prev_randao: Hash256, + timestamp: u64, + block_number: Option, + profit_threshold: Uint256, + spec: &ChainSpec, +) -> Result<(), Box> { + let is_signature_valid = bid.data.verify_signature(spec); + let header = &bid.data.message.header; + let payload_value = bid.data.message.value; + + // Avoid logging values that we can't represent with our Prometheus library. + let payload_value_gwei = bid.data.message.value / 1_000_000_000; + if payload_value_gwei <= Uint256::from(i64::max_value()) { + metrics::set_gauge_vec( + &metrics::EXECUTION_LAYER_PAYLOAD_BIDS, + &[metrics::BUILDER], + payload_value_gwei.low_u64() as i64, + ); + } + + if payload_value < profit_threshold { + Err(Box::new(InvalidBuilderPayload::LowValue { + profit_threshold, + payload_value, + })) + } else if header.parent_hash() != parent_hash { + Err(Box::new(InvalidBuilderPayload::ParentHash { + payload: header.parent_hash(), + expected: parent_hash, + })) + } else if header.prev_randao() != prev_randao { + Err(Box::new(InvalidBuilderPayload::PrevRandao { + payload: header.prev_randao(), + expected: prev_randao, + })) + } else if header.timestamp() != timestamp { + Err(Box::new(InvalidBuilderPayload::Timestamp { + payload: header.timestamp(), + expected: timestamp, + })) + } else if block_number.map_or(false, |n| n != header.block_number()) { + Err(Box::new(InvalidBuilderPayload::BlockNumber { + payload: header.block_number(), + expected: block_number, + })) + } else if !matches!(bid.version, Some(ForkName::Merge)) { + // Once fork information is added to the payload, we will need to + // check that the local and relay payloads match. At this point, if + // we are requesting a payload at all, we have to assume this is + // the Bellatrix fork. + Err(Box::new(InvalidBuilderPayload::Fork { + payload: bid.version, + expected: ForkName::Merge, + })) + } else if !is_signature_valid { + Err(Box::new(InvalidBuilderPayload::Signature { + signature: bid.data.signature.clone(), + pubkey: bid.data.message.pubkey, + })) + } else { + Ok(()) + } +} + +/// A helper function to record the time it takes to execute a future. +async fn timed_future, T>(metric: &str, future: F) -> (T, Duration) { + let start = Instant::now(); + let result = future.await; + let duration = start.elapsed(); + metrics::observe_timer_vec(&metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metric], duration); + (result, duration) +} + #[cfg(test)] mod test { use super::*; diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 9b00193a4a..bb5a1088d1 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -4,10 +4,17 @@ pub const HIT: &str = "hit"; pub const MISS: &str = "miss"; pub const GET_PAYLOAD: &str = "get_payload"; pub const GET_BLINDED_PAYLOAD: &str = "get_blinded_payload"; +pub const GET_BLINDED_PAYLOAD_LOCAL: &str = "get_blinded_payload_local"; +pub const GET_BLINDED_PAYLOAD_BUILDER: &str = "get_blinded_payload_builder"; +pub const POST_BLINDED_PAYLOAD_BUILDER: &str = "post_blinded_payload_builder"; pub const NEW_PAYLOAD: &str = "new_payload"; pub const FORKCHOICE_UPDATED: &str = "forkchoice_updated"; pub const GET_TERMINAL_POW_BLOCK_HASH: &str = "get_terminal_pow_block_hash"; pub const IS_VALID_TERMINAL_POW_BLOCK_HASH: &str = "is_valid_terminal_pow_block_hash"; +pub const LOCAL: &str = "local"; +pub const BUILDER: &str = "builder"; +pub const SUCCESS: &str = "success"; +pub const FAILURE: &str = "failure"; lazy_static::lazy_static! { pub static ref EXECUTION_LAYER_PROPOSER_INSERTED: Result = try_create_int_counter( @@ -18,9 +25,11 @@ lazy_static::lazy_static! { "execution_layer_proposer_data_updated", "Count of times new proposer data is supplied", ); - pub static ref EXECUTION_LAYER_REQUEST_TIMES: Result = try_create_histogram_vec( + pub static ref EXECUTION_LAYER_REQUEST_TIMES: Result = + try_create_histogram_vec_with_buckets( "execution_layer_request_times", "Duration of calls to ELs", + decimal_buckets(-2, 1), &["method"] ); pub static ref EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD: Result = try_create_histogram( @@ -41,4 +50,29 @@ lazy_static::lazy_static! { "Indicates the payload status returned for a particular method", &["method", "status"] ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_OUTCOME: Result = try_create_int_counter_vec( + "execution_layer_get_payload_outcome", + "The success/failure outcomes from calling get_payload", + &["outcome"] + ); + pub static ref EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME: Result = try_create_int_counter_vec( + "execution_layer_builder_reveal_payload_outcome", + "The success/failure outcomes from a builder un-blinding a payload", + &["outcome"] + ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_SOURCE: Result = try_create_int_counter_vec( + "execution_layer_get_payload_source", + "The source of each payload returned from get_payload", + &["source"] + ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS: Result = try_create_int_counter_vec( + "execution_layer_get_payload_builder_rejections", + "The reasons why a payload from a builder was rejected", + &["reason"] + ); + pub static ref EXECUTION_LAYER_PAYLOAD_BIDS: Result = try_create_int_gauge_vec( + "execution_layer_payload_bids", + "The gwei bid value of payloads received by local EEs or builders. Only shows values up to i64::max_value.", + &["source"] + ); } diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 58f28702b0..aaf6a7bea1 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -24,7 +24,7 @@ pub fn new_env() -> Environment { #[test] fn basic() { - let mut env = new_env(); + let env = new_env(); let log = env.core_context().log().clone(); let mut spec = env.eth2_config().spec.clone(); diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index fedd66c540..077e3aa7cd 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -33,6 +33,9 @@ safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" tree_hash = "0.4.1" +sysinfo = "0.26.5" +system_health = { path = "../../common/system_health" } +directory = { path = "../../common/directory" } [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 01cc63ecea..7f6852f364 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -26,12 +26,14 @@ use beacon_chain::{ BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; +use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, EndpointVersion, SkipRandaoVerification, ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -43,6 +45,8 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; +use sysinfo::{System, SystemExt}; +use system_health::observe_system_health_bn; use tokio::sync::mpsc::{Sender, UnboundedSender}; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ @@ -110,6 +114,7 @@ pub struct Config { pub tls_config: Option, pub allow_sync_stalled: bool, pub spec_fork_name: Option, + pub data_dir: PathBuf, } impl Default for Config { @@ -122,6 +127,7 @@ impl Default for Config { tls_config: None, allow_sync_stalled: false, spec_fork_name: None, + data_dir: PathBuf::from(DEFAULT_ROOT_DIR), } } } @@ -323,6 +329,10 @@ pub fn serve( } }); + // Create a `warp` filter for the data_dir. + let inner_data_dir = ctx.config.data_dir.clone(); + let data_dir_filter = warp::any().map(move || inner_data_dir.clone()); + // Create a `warp` filter that provides access to the beacon chain. let inner_ctx = ctx.clone(); let chain_filter = @@ -431,6 +441,37 @@ pub fn serve( let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); + // Create a `warp` filter that provides access to local system information. + let system_info = Arc::new(RwLock::new(sysinfo::System::new())); + { + // grab write access for initialisation + let mut system_info = system_info.write(); + system_info.refresh_disks_list(); + system_info.refresh_networks_list(); + system_info.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything()); + system_info.refresh_cpu(); + } // end lock + + let system_info_filter = + warp::any() + .map(move || system_info.clone()) + .map(|sysinfo: Arc>| { + { + // refresh stats + let mut sysinfo_lock = sysinfo.write(); + sysinfo_lock.refresh_memory(); + sysinfo_lock.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything()); + sysinfo_lock.refresh_cpu(); + sysinfo_lock.refresh_system(); + sysinfo_lock.refresh_networks(); + sysinfo_lock.refresh_disks(); + } // end lock + sysinfo + }); + + let app_start = std::time::Instant::now(); + let app_start_filter = warp::any().map(move || app_start); + /* * * Start of HTTP method definitions. @@ -2822,6 +2863,29 @@ pub fn serve( }) }); + // GET lighthouse/ui/health + let get_lighthouse_ui_health = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("health")) + .and(warp::path::end()) + .and(system_info_filter) + .and(app_start_filter) + .and(data_dir_filter) + .and(network_globals.clone()) + .and_then( + |sysinfo, app_start: std::time::Instant, data_dir, network_globals| { + blocking_json_task(move || { + let app_uptime = app_start.elapsed().as_secs() as u64; + Ok(api_types::GenericResponse::from(observe_system_health_bn( + sysinfo, + data_dir, + app_uptime, + network_globals, + ))) + }) + }, + ); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3271,6 +3335,7 @@ pub fn serve( .or(get_validator_aggregate_attestation.boxed()) .or(get_validator_sync_committee_contribution.boxed()) .or(get_lighthouse_health.boxed()) + .or(get_lighthouse_ui_health.boxed()) .or(get_lighthouse_syncing.boxed()) .or(get_lighthouse_nat.boxed()) .or(get_lighthouse_peers.boxed()) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 3c50fb95a2..08355c1d37 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,6 +1,8 @@ use crate::metrics; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; +use beacon_chain::{ + BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, +}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; use slog::{crit, error, info, warn, Logger}; @@ -35,7 +37,12 @@ pub async fn publish_block( let block_root = block_root.unwrap_or_else(|| block.canonical_root()); match chain - .process_block(block_root, block.clone(), CountUnrealized::True) + .process_block( + block_root, + block.clone(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await { Ok(root) => { diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index eaf91ce9df..ec1448df7b 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -2,6 +2,7 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChain, BeaconChainTypes, }; +use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use http_api::{Config, Context}; use lighthouse_network::{ @@ -142,6 +143,7 @@ pub async fn create_api_server_on_port( allow_origin: None, tls_config: None, allow_sync_stalled: false, + data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, }, chain: Some(chain.clone()), diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 71566b8778..0ae3d9a23b 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -130,6 +130,9 @@ pub struct Config { /// Whether metrics are enabled. pub metrics_enabled: bool, + + /// Whether light client protocols should be enabled. + pub enable_light_client_server: bool, } impl Default for Config { @@ -207,6 +210,7 @@ impl Default for Config { shutdown_after_sync: false, topics: Vec::new(), metrics_enabled: false, + enable_light_client_server: false, } } } @@ -284,9 +288,11 @@ impl From for NetworkLoad { /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { // The function used to generate a gossipsub message id - // We use the first 8 bytes of SHA256(data) for content addressing - let fast_gossip_message_id = - |message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..8]); + // We use the first 8 bytes of SHA256(topic, data) for content addressing + let fast_gossip_message_id = |message: &RawGossipsubMessage| { + let data = [message.topic.as_str().as_bytes(), &message.data].concat(); + FastMessageId::from(&Sha256::digest(data)[..8]) + }; fn prefix( prefix: [u8; 4], message: &GossipsubMessage, diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 3535c6bd9a..8e528f09d2 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -834,6 +834,17 @@ impl Discovery { // Map each subnet query's min_ttl to the set of ENR's returned for that subnet. queries.iter().for_each(|query| { + let query_str = match query.subnet { + Subnet::Attestation(_) => "attestation", + Subnet::SyncCommittee(_) => "sync_committee", + }; + + if let Some(v) = metrics::get_int_counter( + &metrics::TOTAL_SUBNET_QUERIES, + &[query_str], + ) { + v.inc(); + } // A subnet query has completed. Add back to the queue, incrementing retries. self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1); @@ -845,6 +856,12 @@ impl Discovery { .filter(|enr| subnet_predicate(enr)) .map(|enr| enr.peer_id()) .for_each(|peer_id| { + if let Some(v) = metrics::get_int_counter( + &metrics::SUBNET_PEERS_FOUND, + &[query_str], + ) { + v.inc(); + } let other_min_ttl = mapped_results.get_mut(&peer_id); // map peer IDs to the min_ttl furthest in the future diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 66d7a1f74a..2ee224d5e2 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -112,6 +112,19 @@ lazy_static! { &["client"] ); + pub static ref SUBNET_PEERS_FOUND: Result = + try_create_int_counter_vec( + "discovery_query_peers_found", + "Total number of peers found in attestation subnets and sync subnets", + &["type"] + ); + pub static ref TOTAL_SUBNET_QUERIES: Result = + try_create_int_counter_vec( + "discovery_total_queries", + "Total number of discovery subnet queries", + &["type"] + ); + /* * Inbound/Outbound peers */ diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 0f29135956..a468239a9e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -405,7 +405,7 @@ impl PeerManager { debug!(self.log, "Identified Peer"; "peer" => %peer_id, "protocol_version" => &info.protocol_version, "agent_version" => &info.agent_version, - "listening_ addresses" => ?info.listen_addrs, + "listening_addresses" => ?info.listen_addrs, "observed_address" => ?info.observed_addr, "protocols" => ?info.protocols ); @@ -501,6 +501,7 @@ impl PeerManager { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::LightClientBootstrap => PeerAction::LowToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -517,6 +518,7 @@ impl PeerManager { Protocol::BlocksByRange => return, Protocol::BlocksByRoot => return, Protocol::Goodbye => return, + Protocol::LightClientBootstrap => return, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, } @@ -531,6 +533,7 @@ impl PeerManager { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::LightClientBootstrap => return, Protocol::Goodbye => return, Protocol::MetaData => return, Protocol::Status => return, diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index c84e368f16..175dfaf018 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -139,7 +139,7 @@ impl NetworkBehaviour for PeerManager { // TODO: directly emit the ban event? BanResult::BadScore => { // This is a faulty state - error!(self.log, "Connected to a banned peer, re-banning"; "peer_id" => %peer_id); + error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); // Reban the peer self.goodbye_peer(peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); return; diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index a46a05a8ce..a4dd602b3f 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -16,8 +16,8 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockMerge, + light_client_bootstrap::LightClientBootstrap, EthSpec, ForkContext, ForkName, Hash256, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -70,6 +70,7 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. @@ -230,6 +231,7 @@ impl Encoder> for SSZSnappyOutboundCodec< OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode + OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(), }; // SSZ encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { @@ -472,7 +474,11 @@ fn handle_v1_request( Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - + Protocol::LightClientBootstrap => Ok(Some(InboundRequest::LightClientBootstrap( + LightClientBootstrapRequest { + root: Hash256::from_ssz_bytes(decoded_buffer)?, + }, + ))), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. Protocol::MetaData => { @@ -544,6 +550,9 @@ fn handle_v1_response( Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1( MetaDataV1::from_ssz_bytes(decoded_buffer)?, )))), + Protocol::LightClientBootstrap => Ok(Some(RPCResponse::LightClientBootstrap( + LightClientBootstrap::from_ssz_bytes(decoded_buffer)?, + ))), } } @@ -867,6 +876,9 @@ mod tests { OutboundRequest::MetaData(metadata) => { assert_eq!(decoded, InboundRequest::MetaData(metadata)) } + OutboundRequest::LightClientBootstrap(bootstrap) => { + assert_eq!(decoded, InboundRequest::LightClientBootstrap(bootstrap)) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 9ac062adc4..9d6229eb38 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -285,7 +285,7 @@ where } else { if !matches!(response, RPCCodedResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses - trace!(self.log, "Inbound stream has expired, response not sent"; + trace!(self.log, "Inbound stream has expired. Response not sent"; "response" => %response, "id" => inbound_id); } return; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 26d755a6e0..5da595c3db 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -12,7 +12,9 @@ use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, +}; /// Maximum number of blocks in a single request. pub type MaxRequestBlocks = U1024; @@ -243,6 +245,9 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), + /// A response to a get LIGHTCLIENT_BOOTSTRAP request. + LightClientBootstrap(LightClientBootstrap), + /// A PONG response to a PING request. Pong(Ping), @@ -273,6 +278,12 @@ pub enum RPCCodedResponse { StreamTermination(ResponseTermination), } +/// Request a light_client_bootstrap for lightclients peers. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct LightClientBootstrapRequest { + pub root: Hash256, +} + /// The code assigned to an erroneous `RPCResponse`. #[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] @@ -321,6 +332,7 @@ impl RPCCodedResponse { RPCResponse::BlocksByRoot(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, + RPCResponse::LightClientBootstrap(_) => false, }, RPCCodedResponse::Error(_, _) => true, // Stream terminations are part of responses that have chunks @@ -355,6 +367,7 @@ impl RPCResponse { RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, + RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } } @@ -390,6 +403,9 @@ impl std::fmt::Display for RPCResponse { } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), + RPCResponse::LightClientBootstrap(bootstrap) => { + write!(f, "LightClientBootstrap Slot: {}", bootstrap.header.slot) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 7b0092ef71..203a642a8b 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -26,8 +26,8 @@ pub(crate) use protocol::{InboundRequest, RPCProtocol}; pub use handler::SubstreamId; pub use methods::{ - BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks, - RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, + BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, + MaxRequestBlocks, RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, }; pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; @@ -108,18 +108,24 @@ pub struct RPC { /// Queue of events to be processed. events: Vec, RPCHandler>>, fork_context: Arc, + enable_light_client_server: bool, /// Slog logger for RPC behaviour. log: slog::Logger, } impl RPC { - pub fn new(fork_context: Arc, log: slog::Logger) -> Self { + pub fn new( + fork_context: Arc, + enable_light_client_server: bool, + log: slog::Logger, + ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); let limiter = RPCRateLimiterBuilder::new() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) .n_every(Protocol::Ping, 2, Duration::from_secs(10)) .n_every(Protocol::Status, 5, Duration::from_secs(15)) .one_every(Protocol::Goodbye, Duration::from_secs(10)) + .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) .n_every( Protocol::BlocksByRange, methods::MAX_REQUEST_BLOCKS, @@ -132,6 +138,7 @@ impl RPC { limiter, events: Vec::new(), fork_context, + enable_light_client_server, log, } } @@ -188,6 +195,7 @@ where RPCProtocol { fork_context: self.fork_context.clone(), max_rpc_size: max_rpc_size(&self.fork_context), + enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, }, (), diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 7d5acc4364..774303800e 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -38,6 +38,7 @@ pub enum OutboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), } @@ -84,9 +85,12 @@ impl OutboundRequest { ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), ], + // Note: This match arm is technically unreachable as we only respond to light client requests + // that we generate from the beacon state. + // We do not make light client rpc requests from the beacon node + OutboundRequest::LightClientBootstrap(_) => vec![], } } - /* These functions are used in the handler for stream management */ /// Number of responses expected for this request. @@ -98,6 +102,7 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, + OutboundRequest::LightClientBootstrap(_) => 1, } } @@ -110,6 +115,7 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, OutboundRequest::Ping(_) => Protocol::Ping, OutboundRequest::MetaData(_) => Protocol::MetaData, + OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } @@ -121,6 +127,7 @@ impl OutboundRequest { // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + OutboundRequest::LightClientBootstrap(_) => unreachable!(), OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -178,6 +185,9 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), + OutboundRequest::LightClientBootstrap(bootstrap) => { + write!(f, "Lightclient Bootstrap: {}", bootstrap.root) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 81960214b1..1f40f81971 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -153,6 +153,8 @@ pub enum Protocol { Ping, /// The `MetaData` protocol name. MetaData, + /// The `LightClientBootstrap` protocol name. + LightClientBootstrap, } /// RPC Versions @@ -179,6 +181,7 @@ impl std::fmt::Display for Protocol { Protocol::BlocksByRoot => "beacon_blocks_by_root", Protocol::Ping => "ping", Protocol::MetaData => "metadata", + Protocol::LightClientBootstrap => "light_client_bootstrap", }; f.write_str(repr) } @@ -207,6 +210,7 @@ impl std::fmt::Display for Version { pub struct RPCProtocol { pub fork_context: Arc, pub max_rpc_size: usize, + pub enable_light_client_server: bool, pub phantom: PhantomData, } @@ -216,7 +220,7 @@ impl UpgradeInfo for RPCProtocol { /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { - vec![ + let mut supported_protocols = vec![ ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), // V2 variants have higher preference then V1 @@ -227,7 +231,15 @@ impl UpgradeInfo for RPCProtocol { ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), - ] + ]; + if self.enable_light_client_server { + supported_protocols.push(ProtocolId::new( + Protocol::LightClientBootstrap, + Version::V1, + Encoding::SSZSnappy, + )); + } + supported_protocols } } @@ -289,6 +301,10 @@ impl ProtocolId { ::ssz_fixed_len(), ::ssz_fixed_len(), ), + Protocol::LightClientBootstrap => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty } } @@ -312,6 +328,10 @@ impl ProtocolId { as Encode>::ssz_fixed_len(), as Encode>::ssz_fixed_len(), ), + Protocol::LightClientBootstrap => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), } } @@ -417,57 +437,13 @@ pub enum InboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), } -impl UpgradeInfo for InboundRequest { - type Info = ProtocolId; - type InfoIter = Vec; - - // add further protocols as we support more encodings/versions - fn protocol_info(&self) -> Self::InfoIter { - self.supported_protocols() - } -} - /// Implements the encoding per supported protocol for `RPCRequest`. impl InboundRequest { - pub fn supported_protocols(&self) -> Vec { - match self { - // add more protocols when versions/encodings are supported - InboundRequest::Status(_) => vec![ProtocolId::new( - Protocol::Status, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::Goodbye(_) => vec![ProtocolId::new( - Protocol::Goodbye, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::BlocksByRange(_) => vec![ - // V2 has higher preference when negotiating a stream - ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), - ], - InboundRequest::BlocksByRoot(_) => vec![ - // V2 has higher preference when negotiating a stream - ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), - ], - InboundRequest::Ping(_) => vec![ProtocolId::new( - Protocol::Ping, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::MetaData(_) => vec![ - ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), - ], - } - } - /* These functions are used in the handler for stream management */ /// Number of responses expected for this request. @@ -479,6 +455,7 @@ impl InboundRequest { InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, + InboundRequest::LightClientBootstrap(_) => 1, } } @@ -491,6 +468,7 @@ impl InboundRequest { InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, InboundRequest::Ping(_) => Protocol::Ping, InboundRequest::MetaData(_) => Protocol::MetaData, + InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } @@ -506,6 +484,7 @@ impl InboundRequest { InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), InboundRequest::MetaData(_) => unreachable!(), + InboundRequest::LightClientBootstrap(_) => unreachable!(), } } } @@ -609,6 +588,9 @@ impl std::fmt::Display for InboundRequest { InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), + InboundRequest::LightClientBootstrap(bootstrap) => { + write!(f, "LightClientBootstrap: {}", bootstrap.root) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 70b14c33de..6ba9f6e941 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -73,6 +73,8 @@ pub struct RPCRateLimiter { bbrange_rl: Limiter, /// BlocksByRoot rate limiter. bbroots_rl: Limiter, + /// LightClientBootstrap rate limiter. + lcbootstrap_rl: Limiter, } /// Error type for non conformant requests @@ -98,6 +100,8 @@ pub struct RPCRateLimiterBuilder { bbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, + /// Quota for the LightClientBootstrap protocol. + lcbootstrap_quota: Option, } impl RPCRateLimiterBuilder { @@ -116,6 +120,7 @@ impl RPCRateLimiterBuilder { Protocol::Goodbye => self.goodbye_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, + Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, } self } @@ -155,6 +160,9 @@ impl RPCRateLimiterBuilder { let bbrange_quota = self .bbrange_quota .ok_or("BlocksByRange quota not specified")?; + let lcbootstrap_quote = self + .lcbootstrap_quota + .ok_or("LightClientBootstrap quota not specified")?; // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; @@ -163,6 +171,7 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; + let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -176,6 +185,7 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, + lcbootstrap_rl, init_time: Instant::now(), }) } @@ -199,6 +209,7 @@ impl RPCRateLimiter { Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, + Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl, }; check(limiter) } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index e5d81737cf..849a86f51b 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,12 +1,12 @@ use std::sync::Arc; use libp2p::core::connection::ConnectionId; -use types::{EthSpec, SignedBeaconBlock}; +use types::{light_client_bootstrap::LightClientBootstrap, EthSpec, SignedBeaconBlock}; use crate::rpc::{ methods::{ - BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse, - RPCResponse, ResponseTermination, StatusMessage, + BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, + OldBlocksByRangeRequest, RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage, }, OutboundRequest, SubstreamId, }; @@ -34,6 +34,8 @@ pub enum Request { BlocksByRange(BlocksByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), + // light client bootstrap request + LightClientBootstrap(LightClientBootstrapRequest), } impl std::convert::From for OutboundRequest { @@ -47,6 +49,7 @@ impl std::convert::From for OutboundRequest { step: 1, }) } + Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b), Request::Status(s) => OutboundRequest::Status(s), } } @@ -66,6 +69,8 @@ pub enum Response { BlocksByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), + /// A response to a LightClientUpdate request. + LightClientBootstrap(LightClientBootstrap), } impl std::convert::From> for RPCCodedResponse { @@ -80,6 +85,9 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + Response::LightClientBootstrap(b) => { + RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) + } } } } diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 8327293a74..3adc940a6a 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -8,7 +8,6 @@ use libp2p::gossipsub::subscription_filter::{ }; use libp2p::gossipsub::Gossipsub as BaseGossipsub; use libp2p::identify::Identify; -use libp2p::swarm::NetworkBehaviour; use libp2p::NetworkBehaviour; use types::EthSpec; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 97d96d171d..a6f1ce20ad 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -259,7 +259,11 @@ impl Network { (gossipsub, update_gossipsub_scores) }; - let eth2_rpc = RPC::new(ctx.fork_context.clone(), log.clone()); + let eth2_rpc = RPC::new( + ctx.fork_context.clone(), + config.enable_light_client_server, + log.clone(), + ); let discovery = { // Build and start the discovery sub-behaviour @@ -978,6 +982,9 @@ impl Network { Request::Status(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) } + Request::LightClientBootstrap(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["light_client_bootstrap"]) + } Request::BlocksByRange { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) } @@ -1247,6 +1254,14 @@ impl Network { ); Some(event) } + InboundRequest::LightClientBootstrap(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::LightClientBootstrap(req), + ); + Some(event) + } } } Ok(RPCReceived::Response(id, resp)) => { @@ -1274,6 +1289,10 @@ impl Network { RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } + // Should never be reached + RPCResponse::LightClientBootstrap(bootstrap) => { + self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) + } } } Ok(RPCReceived::EndOfStream(id, termination)) => { diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index ce03f61ffe..5f09aec27a 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -74,6 +74,17 @@ impl SyncState { } } + pub fn is_syncing_finalized(&self) -> bool { + match self { + SyncState::SyncingFinalized { .. } => true, + SyncState::SyncingHead { .. } => false, + SyncState::SyncTransition => false, + SyncState::BackFillSyncing { .. } => false, + SyncState::Synced => false, + SyncState::Stalled => false, + } + } + /// Returns true if the node is synced. /// /// NOTE: We consider the node synced if it is fetching old historical blocks. diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 825b1088b2..47d703c260 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -172,29 +172,8 @@ impl From for Topic { impl From for String { fn from(topic: GossipTopic) -> String { - let encoding = match topic.encoding { - GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX, - }; - - let kind = match topic.kind { - GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), - GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), - GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), - GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), - GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(), - GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,), - GossipKind::SignedContributionAndProof => SIGNED_CONTRIBUTION_AND_PROOF_TOPIC.into(), - GossipKind::SyncCommitteeMessage(index) => { - format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) - } - }; - format!( - "/{}/{}/{}/{}", - TOPIC_PREFIX, - hex::encode(topic.fork_digest), - kind, - encoding - ) + // Use the `Display` implementation below. + topic.to_string() } } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index f477878ac0..9528cfd1df 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -41,10 +41,11 @@ use crate::sync::manager::BlockProcessType; use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::parking_lot::Mutex; -use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock}; +use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExecutionLayer}; use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; +use lighthouse_network::rpc::LightClientBootstrapRequest; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -156,6 +157,10 @@ const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; + /// The name of the manager tokio task. const MANAGER_TASK_NAME: &str = "beacon_processor_manager"; @@ -195,6 +200,7 @@ pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; +pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; @@ -557,6 +563,22 @@ impl WorkEvent { } } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. + pub fn lightclient_bootstrap_request( + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::LightClientBootstrapRequest { + peer_id, + request_id, + request, + }, + } + } + /// Get a `str` representation of the type of work this `WorkEvent` contains. pub fn work_type(&self) -> &'static str { self.work.str_id() @@ -733,6 +755,11 @@ pub enum Work { request_id: PeerRequestId, request: BlocksByRootRequest, }, + LightClientBootstrapRequest { + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + }, } impl Work { @@ -755,6 +782,7 @@ impl Work { Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, + Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, } @@ -898,7 +926,7 @@ impl BeaconProcessor { let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); - + let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); @@ -1137,6 +1165,8 @@ impl BeaconProcessor { } else if let Some(item) = backfill_chain_segment.pop() { self.spawn_worker(item, toolbox); // This statement should always be the final else statement. + } else if let Some(item) = lcbootstrap_queue.pop() { + self.spawn_worker(item, toolbox); } else { // Let the journal know that a worker is freed and there's nothing else // for it to do. @@ -1237,6 +1267,9 @@ impl BeaconProcessor { Work::BlocksByRootsRequest { .. } => { bbroots_queue.push(work, work_id, &self.log) } + Work::LightClientBootstrapRequest { .. } => { + lcbootstrap_queue.push(work, work_id, &self.log) + } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) } @@ -1554,8 +1587,24 @@ impl BeaconProcessor { /* * Verification for a chain segment (multiple blocks). */ - Work::ChainSegment { process_id, blocks } => task_spawner - .spawn_async(async move { worker.process_chain_segment(process_id, blocks).await }), + Work::ChainSegment { process_id, blocks } => { + let notify_execution_layer = if self + .network_globals + .sync_state + .read() + .is_syncing_finalized() + { + NotifyExecutionLayer::No + } else { + NotifyExecutionLayer::Yes + }; + + task_spawner.spawn_async(async move { + worker + .process_chain_segment(process_id, blocks, notify_execution_layer) + .await + }) + } /* * Processing of Status Messages. */ @@ -1594,6 +1643,16 @@ impl BeaconProcessor { request, ) }), + /* + * Processing of lightclient bootstrap requests from other peers. + */ + Work::LightClientBootstrapRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking(move || { + worker.handle_light_client_bootstrap(peer_id, request_id, request) + }), Work::UnknownBlockAttestation { message_id, peer_id, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index eaf5cd005c..947d9cfe27 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -7,7 +7,7 @@ use beacon_chain::{ sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, - GossipVerifiedBlock, + GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; @@ -793,7 +793,7 @@ impl Worker { | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::RepeatProposal { .. }) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { - debug!(self.log, "Could not verify block for gossip, ignoring the block"; + debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); // Prevent recurring behaviour by penalizing the peer slightly. self.gossip_penalize_peer( @@ -805,7 +805,7 @@ impl Worker { return None; } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { - debug!(self.log, "Could not verify block for gossip, ignoring the block"; + debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; @@ -827,7 +827,7 @@ impl Worker { // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::GenesisBlock) => { - warn!(self.log, "Could not verify block for gossip, rejecting the block"; + warn!(self.log, "Could not verify block for gossip. Rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -934,7 +934,12 @@ impl Worker { match self .chain - .process_block(block_root, verified_block, CountUnrealized::True) + .process_block( + block_root, + verified_block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await { Ok(block_root) => { diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs index f907c49b7d..1cbc64b632 100644 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ b/beacon_node/network/src/beacon_processor/worker/mod.rs @@ -38,7 +38,7 @@ impl Worker { /// Creates a log if there is an internal error. fn send_network_message(&self, message: NetworkMessage) { self.network_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the network service, likely shutdown"; + debug!(self.log, "Could not send message to the network service. Likely shutdown"; "error" => %e) }); } diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 37aee01716..3e354a70d2 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -11,7 +11,7 @@ use slog::{debug, error}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; use super::Worker; @@ -204,6 +204,79 @@ impl Worker { ) } + /// Handle a `BlocksByRoot` request from the peer. + pub fn handle_light_client_bootstrap( + self, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) { + let block_root = request.root; + let state_root = match self.chain.get_blinded_block(&block_root) { + Ok(signed_block) => match signed_block { + Some(signed_block) => signed_block.state_root(), + None => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + let mut beacon_state = match self.chain.get_state(&state_root, None) { + Ok(beacon_state) => match beacon_state { + Some(state) => state, + None => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + let bootstrap = match LightClientBootstrap::from_beacon_state(&mut beacon_state) { + Ok(bootstrap) => bootstrap, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + self.send_response( + peer_id, + Response::LightClientBootstrap(bootstrap), + request_id, + ) + } + /// Handle a `BlocksByRange` request from the peer. pub fn handle_blocks_by_range_request( self, diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 5d97894fe4..1ec045e97e 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -10,6 +10,7 @@ use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::CountUnrealized; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, + NotifyExecutionLayer, }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; @@ -85,7 +86,12 @@ impl Worker { let slot = block.slot(); let result = self .chain - .process_block(block_root, block, CountUnrealized::True) + .process_block( + block_root, + block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -127,6 +133,7 @@ impl Worker { &self, sync_type: ChainSegmentProcessId, downloaded_blocks: Vec>>, + notify_execution_layer: NotifyExecutionLayer, ) { let result = match sync_type { // this a request from the range sync @@ -136,7 +143,11 @@ impl Worker { let sent_blocks = downloaded_blocks.len(); match self - .process_blocks(downloaded_blocks.iter(), count_unrealized) + .process_blocks( + downloaded_blocks.iter(), + count_unrealized, + notify_execution_layer, + ) .await { (_, Ok(_)) => { @@ -215,7 +226,11 @@ impl Worker { // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse match self - .process_blocks(downloaded_blocks.iter().rev(), CountUnrealized::True) + .process_blocks( + downloaded_blocks.iter().rev(), + CountUnrealized::True, + notify_execution_layer, + ) .await { (imported_blocks, Err(e)) => { @@ -246,11 +261,12 @@ impl Worker { &self, downloaded_blocks: impl Iterator>>, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec> = downloaded_blocks.cloned().collect(); match self .chain - .process_chain_segment(blocks, count_unrealized) + .process_chain_segment(blocks, count_unrealized, notify_execution_layer) .await { ChainSegmentResult::Successful { imported_blocks } => { @@ -428,7 +444,7 @@ impl Worker { } else { // The block is in the future, but not too far. debug!( - self.log, "Block is slightly ahead of our slot clock, ignoring."; + self.log, "Block is slightly ahead of our slot clock. Ignoring."; "present_slot" => present_slot, "block_slot" => block_slot, "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 03b877506f..5df308f259 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -168,6 +168,9 @@ impl Router { Request::BlocksByRoot(request) => self .processor .on_blocks_by_root_request(peer_id, id, request), + Request::LightClientBootstrap(request) => self + .processor + .on_lightclient_bootstrap(peer_id, id, request), } } @@ -192,6 +195,7 @@ impl Router { self.processor .on_blocks_by_root_response(peer_id, request_id, beacon_block); } + Response::LightClientBootstrap(_) => unreachable!(), } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index ce11cbdcef..3c9a4a81fb 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -160,6 +160,18 @@ impl Processor { )) } + /// Handle a `LightClientBootstrap` request from the peer. + pub fn on_lightclient_bootstrap( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::lightclient_bootstrap_request( + peer_id, request_id, request, + )) + } + /// Handle a `BlocksByRange` request from the peer. pub fn on_blocks_by_range_request( &mut self, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index cdef904715..230c883a93 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -633,7 +633,7 @@ impl SyncManager { // Some logs. if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 { - debug!(self.log, "Execution engine not online, dropping active requests."; + debug!(self.log, "Execution engine not online. Dropping active requests."; "dropped_single_blocks_requests" => dropped_single_blocks_requests, "dropped_parent_chain_requests" => dropped_parent_chain_requests, ); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 45ade7034c..c81fed2443 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -242,7 +242,7 @@ impl SyncNetworkContext { source: ReportSource::SyncService, }) .unwrap_or_else(|_| { - warn!(self.log, "Could not report peer, channel failed"); + warn!(self.log, "Could not report peer: channel failed"); }); } @@ -257,7 +257,7 @@ impl SyncNetworkContext { msg, }) .unwrap_or_else(|e| { - warn!(self.log, "Could not report peer, channel failed"; "error"=> %e); + warn!(self.log, "Could not report peer: channel failed"; "error"=> %e); }); } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index b00d56513c..44a995176d 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -875,4 +875,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { [experimental]") .takes_value(false) ) + .arg( + Arg::with_name("gui") + .long("gui") + .hidden(true) + .help("Enable the graphical user interface and all its requirements. \ + This is equivalent to --http and --validator-monitor-auto.") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 99e0af6e4c..e98b585f5f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -14,6 +14,7 @@ use std::cmp::max; use std::fmt::Debug; use std::fmt::Write; use std::fs; +use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -34,13 +35,13 @@ pub fn get_config( let spec = &context.eth2_config.spec; let log = context.log(); - let mut client_config = ClientConfig { - data_dir: get_data_dir(cli_args), - ..Default::default() - }; + let mut client_config = ClientConfig::default(); + + // Update the client's data directory + client_config.set_data_dir(get_data_dir(cli_args)); // If necessary, remove any existing database and configuration - if client_config.data_dir.exists() && cli_args.is_present("purge-db") { + if client_config.data_dir().exists() && cli_args.is_present("purge-db") { // Remove the chain_db. let chain_db = client_config.get_db_path(); if chain_db.exists() { @@ -57,11 +58,11 @@ pub fn get_config( } // Create `datadir` and any non-existing parent directories. - fs::create_dir_all(&client_config.data_dir) + fs::create_dir_all(client_config.data_dir()) .map_err(|e| format!("Failed to create data dir: {}", e))?; // logs the chosen data directory - let mut log_dir = client_config.data_dir.clone(); + let mut log_dir = client_config.data_dir().clone(); // remove /beacon from the end log_dir.pop(); info!(log, "Data directory initialised"; "datadir" => log_dir.into_os_string().into_string().expect("Datadir should be a valid os string")); @@ -69,10 +70,13 @@ pub fn get_config( /* * Networking */ + + let data_dir_ref = client_config.data_dir().clone(); + set_network_config( &mut client_config.network, cli_args, - &client_config.data_dir, + &data_dir_ref, log, false, )?; @@ -303,7 +307,7 @@ pub fn get_config( } else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") { use std::fs::File; use std::io::Write; - secret_file = client_config.data_dir.join(DEFAULT_JWT_FILE); + secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); let mut jwt_secret_key_file = File::create(secret_file.clone()) .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; jwt_secret_key_file @@ -332,7 +336,7 @@ pub fn get_config( clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; - el_config.default_datadir = client_config.data_dir.clone(); + el_config.default_datadir = client_config.data_dir().clone(); el_config.builder_profit_threshold = clap_utils::parse_required(cli_args, "builder-profit-threshold")?; let execution_timeout_multiplier = @@ -573,7 +577,7 @@ pub fn get_config( let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { PathBuf::from(slasher_dir) } else { - client_config.data_dir.join("slasher_db") + client_config.data_dir().join("slasher_db") }; let mut slasher_config = slasher::Config::new(slasher_dir); @@ -705,8 +709,11 @@ pub fn get_config( client_config.chain.builder_fallback_disable_checks = cli_args.is_present("builder-fallback-disable-checks"); - // Light client server config. - client_config.chain.enable_light_client_server = cli_args.is_present("light-client-server"); + // Graphical user interface config. + if cli_args.is_present("gui") { + client_config.http_api.enabled = true; + client_config.validator_monitor_auto = true; + } Ok(client_config) } @@ -837,9 +844,11 @@ pub fn set_network_config( } if cli_args.is_present("enr-match") { - // set the enr address to localhost if the address is 0.0.0.0 - if config.listen_address == "0.0.0.0".parse::().expect("valid ip addr") { - config.enr_address = Some("127.0.0.1".parse::().expect("valid ip addr")); + // set the enr address to localhost if the address is unspecified + if config.listen_address == IpAddr::V4(Ipv4Addr::UNSPECIFIED) { + config.enr_address = Some(IpAddr::V4(Ipv4Addr::LOCALHOST)); + } else if config.listen_address == IpAddr::V6(Ipv6Addr::UNSPECIFIED) { + config.enr_address = Some(IpAddr::V6(Ipv6Addr::LOCALHOST)); } else { config.enr_address = Some(config.listen_address); } @@ -919,6 +928,9 @@ pub fn set_network_config( config.discv5_config.table_filter = |_| true; } + // Light client server config. + config.enable_light_client_server = cli_args.is_present("light-client-server"); + Ok(()) } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index d05677465b..a43fa10e64 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -1,9 +1,6 @@ # Summary * [Introduction](./intro.md) -* [Become a Validator](./mainnet-validator.md) - * [Become a Testnet Validator](./testnet-validator.md) -* [Merge Migration](./merge-migration.md) * [Installation](./installation.md) * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) @@ -13,6 +10,9 @@ * [Cross-Compiling](./cross-compiling.md) * [Homebrew](./homebrew.md) * [Update Priorities](./installation-priorities.md) +* [Run a Node](./run_a_node.md) +* [Become a Validator](./mainnet-validator.md) + * [Become a Testnet Validator](./testnet-validator.md) * [Key Management](./key-management.md) * [Create a wallet](./wallet-create.md) * [Create a validator](./validator-create.md) @@ -46,6 +46,7 @@ * [Pre-Releases](./advanced-pre-releases.md) * [Release Candidates](./advanced-release-candidates.md) * [MEV and Lighthouse](./builders.md) + * [Merge Migration](./merge-migration.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index d9c8080b4d..c1ba6a2dcc 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -62,6 +62,43 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j ``` +### `/lighthouse/ui/health` + + +```bash +curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "total_memory": 16443219968, + "free_memory": 1283739648, + "used_memory": 5586264064, + "sys_loadavg_1": 0.59, + "sys_loadavg_5": 1.13, + "sys_loadavg_15": 2.41, + "cpu_cores": 4, + "cpu_threads": 8, + "global_cpu_frequency": 3.4, + "disk_bytes_total": 502390845440, + "disk_bytes_free": 9981386752, + "network_name": "wlp0s20f3", + "network_bytes_total_received": 14105556611, + "network_bytes_total_transmit": 3649489389, + "nat_open": true, + "connected_peers": 80, + "sync_state": "Synced", + "system_uptime": 660706, + "app_uptime": 105, + "system_name": "Arch Linux", + "kernel_version": "5.19.13-arch1-1", + "os_version": "Linux rolling Arch Linux", + "host_name": "Computer1" + } +} +``` + ### `/lighthouse/syncing` ```bash diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 9aedf6e249..76cffc0e4f 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -6,6 +6,7 @@ HTTP Path | Description | | --- | -- | [`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. [`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. +[`GET /lighthouse/ui/health`](#get-lighthouseuihealth) | Get information about the host machine. Focused for UI applications. [`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator. [`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. [`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. @@ -77,6 +78,45 @@ Returns information regarding the health of the host machine. } ``` +## `GET /lighthouse/ui/health` + +Returns information regarding the health of the host machine. + +### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/ui/health` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | + +### Example Response Body + +```json +{ + "data": { + "total_memory": 16443219968, + "free_memory": 1283739648, + "used_memory": 5586264064, + "sys_loadavg_1": 0.59, + "sys_loadavg_5": 1.13, + "sys_loadavg_15": 2.41, + "cpu_cores": 4, + "cpu_threads": 8, + "global_cpu_frequency": 3.4, + "disk_bytes_total": 502390845440, + "disk_bytes_free": 9981386752, + "system_uptime": 660706, + "app_uptime": 105, + "system_name": "Arch Linux", + "kernel_version": "5.19.13-arch1-1", + "os_version": "Linux rolling Arch Linux", + "host_name": "Computer1" + } +} +``` + ## `GET /lighthouse/spec` Returns the Ethereum proof-of-stake consensus specification loaded for this validator. diff --git a/book/src/intro.md b/book/src/intro.md index fca075892b..ef16913d68 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -18,6 +18,7 @@ We implement the specification as defined in the You may read this book from start to finish, or jump to some of these topics: - Follow the [Installation Guide](./installation.md) to install Lighthouse. +- Run your very [own beacon node](./run_a_node.md). - Learn about [becoming a mainnet validator](./mainnet-validator.md). - Get hacking with the [Development Environment Guide](./setup.md). - Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index c0ba048997..08f1b51e42 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -61,6 +61,7 @@ the relevant page for your execution engine for the required flags: - [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) - [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) +- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) Once you have configured your execution engine to open up the engine API (usually on port 8551) you should add the URL to your `lighthouse bn` flags with `--execution-endpoint `, as well as diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md new file mode 100644 index 0000000000..5ce42aa630 --- /dev/null +++ b/book/src/run_a_node.md @@ -0,0 +1,171 @@ +# Run a Node + +This document provides detail for users who want to run a Lighthouse beacon node. +You should be finished with one [Installation](./installation.md) method of your choice to continue with the following steps: + +1. Set up an [execution node](#step-1-set-up-an-execution-node); +1. Enable [checkpoint sync](#step-2-choose-a-checkpoint-sync-provider); +1. Run [Lighthouse](#step-3-run-lighthouse); +1. [Check logs](#step-4-check-logs); and +1. [Further readings](#step-5-further-readings). + +Checkpoint sync is *optional*; however, we recommend it since it is substantially faster +than syncing from genesis while still providing the same functionality. + +## Step 1: Set up an execution node + +The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions +present in blocks. Two flags are used to configure this connection: + +- `--execution-endpoint`: the *URL* of the execution engine API. Often this will be + `http://localhost:8551`. +- `--execution-jwt`: the *path* to the file containing the JWT secret shared by Lighthouse and the + execution engine. This is a mandatory form of authentication that ensures that Lighthouse +has authority to control the execution engine. + +Each execution engine has its own flags for configuring the engine API and JWT. +Please consult the relevant page of your execution engine for the required flags: + +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) +- [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) +- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) + +The execution engine connection must be *exclusive*, i.e. you must have one execution node +per beacon node. The reason for this is that the beacon node _controls_ the execution node. + +## Step 2: Choose a checkpoint sync provider + +Lighthouse supports fast sync from a recent finalized checkpoint. +The checkpoint sync is done using a [public endpoint](#use-a-community-checkpoint-sync-endpoint) +provided by the Ethereum community. + +In [step 3](#step-3-run-lighthouse), when running Lighthouse, +we will enable checkpoint sync by providing the URL to the `--checkpoint-sync-url` flag. + +### Use a community checkpoint sync endpoint + +The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the URL. + +For example, the URL for Sigma Prime's checkpoint sync server for mainnet is `https://mainnet.checkpoint.sigp.io`, +which we will use in [step 3](#step-3-run-lighthouse). + +## Step 3: Run Lighthouse + +To run Lighthouse, we use the three flags from the steps above: +- `--execution-endpoint`; +- `--execution-jwt`; and +- `--checkpoint-sync-url`. + +Additionally, we run Lighthouse with the `--network` flag, which selects a network: + +- `lighthouse` (no flag): Mainnet. +- `lighthouse --network mainnet`: Mainnet. +- `lighthouse --network goerli`: Goerli (testnet). + +Using the correct `--network` flag is very important; using the wrong flag can +result in penalties, slashings or lost deposits. As a rule of thumb, *always* +provide a `--network` flag instead of relying on the default. + +For the testnets we support [Goerli](https://goerli.net/) (`--network goerli`), +[Sepolia](https://sepolia.dev/) (`--network sepolia`), and [Gnosis chain](https://www.gnosis.io/) (`--network gnosis`). + +Minor modifications depend on if you want to run your node while [staking](#staking) or [non-staking](#non-staking). +In the following, we will provide examples of what a Lighthouse setup could look like. + +### Staking + +``` +lighthouse bn \ + --network mainnet \ + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex \ + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ + --http +``` + +A Lighthouse beacon node can be configured to expose an HTTP server by supplying the `--http` flag. +The default listen address is `127.0.0.1:5052`. +The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. + +### Non-staking + +``` +lighthouse bn \ + --network mainnet \ + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex \ + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ + --disable-deposit-contract-sync +``` + +Since we are not staking, we can use the `--disable-deposit-contract-sync` flag. + +--- + +Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. + +## Step 4: Check logs +Several logs help you identify if Lighthouse is running correctly. + +### Logs - Checkpoint sync +Lighthouse will print a message to indicate that checkpoint sync is being used: + +``` +INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon +``` + +After a short time (usually less than a minute), it will log the details of the checkpoint +loaded from the remote beacon node: + +``` +INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon +``` + +Once the checkpoint is loaded Lighthouse will sync forwards to the head of the chain. + +If a validator client is connected to the node then it will be able to start completing its duties +as soon as forwards sync completes. + +> **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint +> against a trusted source like another [public endpoint](https://eth-clients.github.io/checkpoint-sync-endpoints/), +> a friend's node, or a block explorer. + +#### Backfilling Blocks + +Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks +from the checkpoint back to genesis. + +The beacon node will log messages similar to the following each minute while it completes backfill +sync: + +``` +INFO Downloading historical blocks est_time: 5 hrs 0 mins, speed: 111.96 slots/sec, distance: 2020451 slots (40 weeks 0 days), service: slot_notifier +``` + +Once backfill is complete, a `INFO Historical block download complete` log will be emitted. + +Check out the [FAQ](./checkpoint-sync.md#faq) for more information on checkpoint sync. + +### Logs - Syncing + +You should see that Lighthouse remains in sync and marks blocks +as `verified` indicating that they have been processed successfully by the execution engine: + +``` +INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 +``` + + +## Step 5: Further readings + +Several other resources are the next logical step to explore after running your beacon node: + +- Learn how to [become a validator](./mainnet-validator.md); +- Explore how to [manage your keys](./key-management.md); +- Research on [validator management](./validator-management.md); +- Dig into the [APIs](./api.md) that the beacon node and validator client provide; +- Study even more about [checkpoint sync](./checkpoint-sync.md); or +- Investigate what steps had to be taken in the past to execute a smooth [merge migration](./merge-migration.md). + +Finally, if you a struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! \ No newline at end of file diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 4f4c18ac84..1dedabe4a4 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "3.2.1" +version = "3.3.0" authors = ["Sigma Prime "] edition = "2021" diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 4df7a5f235..b7a66cbbd8 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,9 +1,11 @@ use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; +use lighthouse_network::discv5::enr::EnrBuilder; +use lighthouse_network::discv5::IpMode; use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr}; use lighthouse_network::{ - discovery::{create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr}, + discovery::{load_enr_from_disk, use_or_load_enr}, load_private_key, CombinedKeyExt, NetworkConfig, }; use serde_derive::{Deserialize, Serialize}; @@ -70,6 +72,15 @@ impl BootNodeConfig { // the address to listen on let listen_socket = SocketAddr::new(network_config.listen_address, network_config.discovery_port); + if listen_socket.is_ipv6() { + // create ipv6 sockets and enable ipv4 mapped addresses. + network_config.discv5_config.ip_mode = IpMode::Ip6 { + enable_mapped_addresses: true, + }; + } else { + // Set explicitly as ipv4 otherwise + network_config.discv5_config.ip_mode = IpMode::Ip4; + } let private_key = load_private_key(&network_config, &logger); let local_key = CombinedKey::from_libp2p(&private_key)?; @@ -104,7 +115,29 @@ impl BootNodeConfig { // Build the local ENR let mut local_enr = { - let mut builder = create_enr_builder_from_config(&network_config, false); + let mut builder = EnrBuilder::new("v4"); + // Set the enr address if specified. Set also the port. + // NOTE: if the port is specified but the the address is not, the port won't be + // set since it can't be known if it's an ipv6 or ipv4 udp port. + if let Some(enr_address) = network_config.enr_address { + match enr_address { + std::net::IpAddr::V4(ipv4_addr) => { + builder.ip4(ipv4_addr); + if let Some(port) = network_config.enr_udp_port { + builder.udp4(port); + } + } + std::net::IpAddr::V6(ipv6_addr) => { + builder.ip6(ipv6_addr); + if let Some(port) = network_config.enr_udp_port { + builder.udp6(port); + // We are enabling mapped addresses in the boot node in this case, + // so advertise an udp4 port as well. + builder.udp4(port); + } + } + } + }; // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index c4bf887e94..8f38fb300d 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -9,53 +9,63 @@ use slog::info; use types::EthSpec; pub async fn run(config: BootNodeConfig, log: slog::Logger) { + let BootNodeConfig { + listen_socket, + boot_nodes, + local_enr, + local_key, + discv5_config, + .. + } = config; + // Print out useful information about the generated ENR - let enr_socket = config - .local_enr - .udp4_socket() - .expect("Enr has a UDP socket"); - let eth2_field = config - .local_enr + let enr_v4_socket = local_enr.udp4_socket(); + let enr_v6_socket = local_enr.udp6_socket(); + let eth2_field = local_enr .eth2() .map(|fork_id| hex::encode(fork_id.fork_digest)) .unwrap_or_default(); - info!(log, "Configuration parameters"; "listening_address" => format!("{}:{}", config.listen_socket.ip(), config.listen_socket.port()), "broadcast_address" => format!("{}:{}",enr_socket.ip(), enr_socket.port()), "eth2" => eth2_field); + let pretty_v4_socket = enr_v4_socket.as_ref().map(|addr| addr.to_string()); + let pretty_v6_socket = enr_v6_socket.as_ref().map(|addr| addr.to_string()); + info!( + log, "Configuration parameters"; + "listening_address" => %listen_socket, + "advertised_v4_address" => ?pretty_v4_socket, + "advertised_v6_address" => ?pretty_v6_socket, + "eth2" => eth2_field + ); - info!(log, "Identity established"; "peer_id" => config.local_enr.peer_id().to_string(), "node_id" => config.local_enr.node_id().to_string()); + info!(log, "Identity established"; "peer_id" => %local_enr.peer_id(), "node_id" => %local_enr.node_id()); // build the contactable multiaddr list, adding the p2p protocol - info!(log, "Contact information"; "enr" => config.local_enr.to_base64()); - info!(log, "Contact information"; "multiaddrs" => format!("{:?}", config.local_enr.multiaddr_p2p())); + info!(log, "Contact information"; "enr" => local_enr.to_base64()); + info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p()); // construct the discv5 server - let mut discv5 = Discv5::new( - config.local_enr.clone(), - config.local_key, - config.discv5_config, - ) - .unwrap(); + let mut discv5 = Discv5::new(local_enr.clone(), local_key, discv5_config).unwrap(); // If there are any bootnodes add them to the routing table - for enr in config.boot_nodes { + for enr in boot_nodes { info!( log, "Adding bootnode"; - "address" => ?enr.udp4_socket(), - "peer_id" => enr.peer_id().to_string(), - "node_id" => enr.node_id().to_string() + "ipv4_address" => ?enr.udp4_socket(), + "ipv6_address" => ?enr.udp6_socket(), + "peer_id" => ?enr.peer_id(), + "node_id" => ?enr.node_id() ); - if enr != config.local_enr { + if enr != local_enr { if let Err(e) = discv5.add_enr(enr) { - slog::warn!(log, "Failed adding ENR"; "error" => e.to_string()); + slog::warn!(log, "Failed adding ENR"; "error" => ?e); } } } // start the server - if let Err(e) = discv5.start(config.listen_socket).await { - slog::crit!(log, "Could not start discv5 server"; "error" => e.to_string()); + if let Err(e) = discv5.start(listen_socket).await { + slog::crit!(log, "Could not start discv5 server"; "error" => %e); return; } @@ -72,7 +82,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { let mut event_stream = match discv5.event_stream().await { Ok(stream) => stream, Err(e) => { - slog::crit!(log, "Failed to obtain event stream"; "error" => e.to_string()); + slog::crit!(log, "Failed to obtain event stream"; "error" => %e); return; } }; @@ -81,9 +91,35 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { loop { tokio::select! { _ = metric_interval.tick() => { + // Get some ipv4/ipv6 stats to add in the metrics. + let mut ipv4_only_reachable: usize = 0; + let mut ipv6_only_reachable: usize= 0; + let mut ipv4_ipv6_reachable: usize = 0; + let mut unreachable_nodes: usize = 0; + for enr in discv5.kbuckets().iter_ref().filter_map(|entry| entry.status.is_connected().then_some(entry.node.value)) { + let declares_ipv4 = enr.udp4_socket().is_some(); + let declares_ipv6 = enr.udp6_socket().is_some(); + match (declares_ipv4, declares_ipv6) { + (true, true) => ipv4_ipv6_reachable += 1, + (true, false) => ipv4_only_reachable += 1, + (false, true) => ipv6_only_reachable += 1, + (false, false) => unreachable_nodes += 1, + } + } + // display server metrics let metrics = discv5.metrics(); - info!(log, "Server metrics"; "connected_peers" => discv5.connected_peers(), "active_sessions" => metrics.active_sessions, "requests/s" => format!("{:.2}", metrics.unsolicited_requests_per_second)); + info!( + log, "Server metrics"; + "connected_peers" => discv5.connected_peers(), + "active_sessions" => metrics.active_sessions, + "requests/s" => format_args!("{:.2}", metrics.unsolicited_requests_per_second), + "ipv4_nodes" => ipv4_only_reachable, + "ipv6_nodes" => ipv6_only_reachable, + "ipv6_and_ipv4_nodes" => ipv4_ipv6_reachable, + "unreachable_nodes" => unreachable_nodes, + ); + } Some(event) = event_stream.recv() => { match event { @@ -95,7 +131,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { Discv5Event::TalkRequest(_) => {} // Ignore Discv5Event::NodeInserted { .. } => {} // Ignore Discv5Event::SocketUpdated(socket_addr) => { - info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr)); + info!(log, "Advertised socket address updated"; "socket_addr" => %socket_addr); } Discv5Event::SessionEstablished{ .. } => {} // Ignore } diff --git a/bors.toml b/bors.toml index 6edf55bfa3..dbe92c68f4 100644 --- a/bors.toml +++ b/bors.toml @@ -23,7 +23,8 @@ status = [ "check-msrv", "slasher-tests", "syncing-simulator-ubuntu", - "disallowed-from-async-lint" + "disallowed-from-async-lint", + "compile-with-beta-compiler" ] use_squash_merge = true timeout_sec = 10800 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 7987899c3d..d55ef3f3b5 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -6,8 +6,8 @@ PRESET_BASE: 'gnosis' # Transition # --------------------------------------------------------------- -# TBD, 2**256-2**10 is a placeholder -TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# Estimated on Dec 5, 2022 +TERMINAL_TOTAL_DIFFICULTY: 8626000000000000000000058750000000000000000000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 @@ -35,7 +35,7 @@ ALTAIR_FORK_VERSION: 0x01000064 ALTAIR_FORK_EPOCH: 512 # Merge BELLATRIX_FORK_VERSION: 0x02000064 -BELLATRIX_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_EPOCH: 385536 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 2bfd003266..7aef784373 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -226,7 +226,7 @@ mod tests { use super::*; use ssz::Encode; use tempfile::Builder as TempBuilder; - use types::{Config, Eth1Data, GnosisEthSpec, Hash256, MainnetEthSpec, GNOSIS}; + use types::{Config, Eth1Data, GnosisEthSpec, Hash256, MainnetEthSpec}; type E = MainnetEthSpec; @@ -250,6 +250,13 @@ mod tests { assert_eq!(spec, config.chain_spec::().unwrap()); } + #[test] + fn gnosis_config_eq_chain_spec() { + let config = Eth2NetworkConfig::from_hardcoded_net(&GNOSIS).unwrap(); + let spec = ChainSpec::gnosis(); + assert_eq!(spec, config.chain_spec::().unwrap()); + } + #[test] fn mainnet_genesis_state() { let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap(); @@ -270,7 +277,7 @@ mod tests { .unwrap_or_else(|_| panic!("{:?}", net.name)); // Ensure we can parse the YAML config to a chain spec. - if net.name == GNOSIS { + if net.name == types::GNOSIS { config.chain_spec::().unwrap(); } else { config.chain_spec::().unwrap(); diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index a48ba211d9..afcbae513b 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.2.1-", - fallback = "Lighthouse/v3.2.1" + prefix = "Lighthouse/v3.3.0-", + fallback = "Lighthouse/v3.3.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml new file mode 100644 index 0000000000..0956710b82 --- /dev/null +++ b/common/system_health/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "system_health" +version = "0.1.0" +edition = "2021" + +[dependencies] +lighthouse_network = { path = "../../beacon_node/lighthouse_network" } +types = { path = "../../consensus/types" } +sysinfo = "0.26.5" +serde = "1.0.116" +serde_derive = "1.0.116" +serde_json = "1.0.58" +parking_lot = "0.12.0" diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs new file mode 100644 index 0000000000..d10540e506 --- /dev/null +++ b/common/system_health/src/lib.rs @@ -0,0 +1,241 @@ +use lighthouse_network::{types::SyncState, NetworkGlobals}; +use parking_lot::RwLock; +use serde::{Deserialize, Serialize}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use sysinfo::{CpuExt, DiskExt, NetworkExt, NetworksExt, System, SystemExt}; +use types::EthSpec; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SystemHealth { + /// Total memory of the system. + pub total_memory: u64, + /// Total free memory available to the system. + pub free_memory: u64, + /// Total used memory. + pub used_memory: u64, + + /// System load average over 1 minute. + pub sys_loadavg_1: f64, + /// System load average over 5 minutes. + pub sys_loadavg_5: f64, + /// System load average over 15 minutes. + pub sys_loadavg_15: f64, + + /// Total cpu cores. + pub cpu_cores: usize, + /// Total cpu threads. + pub cpu_threads: usize, + /// The global cpu frequency. + pub global_cpu_frequency: f32, + + /// Total capacity of disk. + pub disk_bytes_total: u64, + /// Free space in disk. + pub disk_bytes_free: u64, + + /// System uptime. + pub system_uptime: u64, + /// Application uptime. + pub app_uptime: u64, + /// The System name + pub system_name: String, + /// Kernel version + pub kernel_version: String, + /// OS version + pub os_version: String, + /// Hostname + pub host_name: String, +} + +/// System related health, specific to the UI for the validator client. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SystemHealthVC { + #[serde(flatten)] + pub system_health: SystemHealth, +} + +/// System related health, specific to the UI for the Beacon Node. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SystemHealthBN { + #[serde(flatten)] + pub system_health: SystemHealth, + /// The name of the network that uses the most traffic. + pub network_name: String, + /// Total bytes received over the main interface. + pub network_bytes_total_received: u64, + /// Total bytes sent over the main interface. + pub network_bytes_total_transmit: u64, + + /// The current NAT status. + pub nat_open: bool, + /// The current number of connected peers. + pub connected_peers: usize, + /// The current syncing state of the consensus node. + pub sync_state: SyncState, +} + +/// Populates the system health. +fn observe_system_health( + sysinfo: Arc>, + data_dir: PathBuf, + app_uptime: u64, +) -> SystemHealth { + let sysinfo = sysinfo.read(); + let loadavg = sysinfo.load_average(); + + let cpus = sysinfo.cpus(); + + let disks = sysinfo.disks(); + + let system_uptime = sysinfo.uptime(); + + // Helper functions to extract specific data + + // Find fs associated with the data dir location and report this + let (disk_bytes_total, disk_bytes_free) = { + // There is no clean way to find this in an OS-agnostic way. We take a simple approach, + // which is attempt to match the mount_point to the data_dir. If this cannot be done, we + // just fallback to the root fs. + + let mut root_fs_disk = None; + let mut other_matching_fs = None; + + for disk in disks.iter() { + if disk.mount_point() == Path::new("/") + || disk.mount_point() == Path::new("C:\\") + || disk.mount_point() == Path::new("/System/Volumes/Data") + { + // Found the usual default root_fs + root_fs_disk = Some(disk); + continue; + } + + // If we have other file systems, compare these to the data_dir of Lighthouse and + // prioritize these. + if data_dir + .to_str() + .map(|path| { + if let Some(mount_str) = disk.mount_point().to_str() { + path.contains(mount_str) + } else { + false + } + }) + .unwrap_or(false) + { + other_matching_fs = Some(disk); + break; // Don't bother finding other competing fs. + } + } + + // If we found a file system other than the root, report this, otherwise just report the + // root fs + let fs = other_matching_fs.or(root_fs_disk); + + // If the root fs is not known, just add up the total of all known partitions + match fs { + Some(fs) => (fs.total_space(), fs.available_space()), + None => { + // If we can't find a known partition, just add them all up + disks.iter().fold((0, 0), |mut current_sizes, disk| { + current_sizes.0 += disk.total_space(); + current_sizes.1 += disk.available_space(); + current_sizes + }) + } + } + }; + + // Attempt to get the clock speed from the name of the CPU + let cpu_frequency_from_name = cpus.iter().next().and_then(|cpu| { + cpu.brand() + .split_once("GHz") + .and_then(|(result, _)| result.trim().rsplit_once(' ')) + .and_then(|(_, result)| result.parse::().ok()) + }); + + let global_cpu_frequency = match cpu_frequency_from_name { + Some(freq) => freq, + None => { + // Get the frequency from average measured frequencies + let global_cpu_frequency: f32 = + cpus.iter().map(|cpu| cpu.frequency()).sum::() as f32 / cpus.len() as f32; + // Shift to ghz to 1dp + (global_cpu_frequency / 100.0).round() / 10.0 + } + }; + + SystemHealth { + total_memory: sysinfo.total_memory(), + free_memory: sysinfo.free_memory(), + used_memory: sysinfo.used_memory(), + sys_loadavg_1: loadavg.one, + sys_loadavg_5: loadavg.five, + sys_loadavg_15: loadavg.fifteen, + cpu_cores: sysinfo.physical_core_count().unwrap_or(0), + cpu_threads: cpus.len(), + global_cpu_frequency, + disk_bytes_total, + disk_bytes_free, + system_uptime, + app_uptime, + system_name: sysinfo.name().unwrap_or_else(|| String::from("")), + kernel_version: sysinfo.kernel_version().unwrap_or_else(|| "".into()), + os_version: sysinfo.long_os_version().unwrap_or_else(|| "".into()), + host_name: sysinfo.host_name().unwrap_or_else(|| "".into()), + } +} + +/// Observes the Validator client system health. +pub fn observe_system_health_vc( + sysinfo: Arc>, + data_dir: PathBuf, + app_uptime: u64, +) -> SystemHealthVC { + SystemHealthVC { + system_health: observe_system_health(sysinfo, data_dir, app_uptime), + } +} + +/// Observes the Beacon Node system health. +pub fn observe_system_health_bn( + sysinfo: Arc>, + data_dir: PathBuf, + app_uptime: u64, + network_globals: Arc>, +) -> SystemHealthBN { + let system_health = observe_system_health(sysinfo.clone(), data_dir, app_uptime); + + // Find the network with the most traffic and assume this is the main network + let sysinfo = sysinfo.read(); + let networks = sysinfo.networks(); + let (network_name, network_bytes_total_received, network_bytes_total_transmit) = networks + .iter() + .max_by_key(|(_name, network)| network.total_received()) + .map(|(name, network)| { + ( + name.clone(), + network.total_received(), + network.total_transmitted(), + ) + }) + .unwrap_or_else(|| (String::from("None"), 0, 0)); + + // Determine if the NAT is open or not. + let nat_open = lighthouse_network::metrics::NAT_OPEN + .as_ref() + .map(|v| v.get()) + .unwrap_or(0) + != 0; + + SystemHealthBN { + system_health, + network_name, + network_bytes_total_received, + network_bytes_total_transmit, + nat_open, + connected_peers: network_globals.connected_peers(), + sync_state: network_globals.sync_state(), + } +} diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index b0cf4551ee..0539cc7d2c 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -22,7 +22,7 @@ pub trait BitfieldBehaviour: Clone {} /// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`. /// /// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct Variable { _phantom: PhantomData, } @@ -30,7 +30,7 @@ pub struct Variable { /// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`. /// /// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct Fixed { _phantom: PhantomData, } @@ -96,7 +96,7 @@ pub type BitVector = Bitfield>; /// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest /// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. #[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq, Hash(bound = ""))] +#[derivative(PartialEq, Eq, Hash(bound = ""))] pub struct Bitfield { bytes: SmallVec<[u8; SMALLVEC_LEN]>, len: usize, diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index fdd3f95a65..0bd5f61aff 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,8 +1,11 @@ +use crate::common::get_indexed_attestation; +use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; +use std::collections::{hash_map::Entry, HashMap}; use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, - Slot, + Attestation, AttestationData, BeaconState, BeaconStateError, BitList, ChainSpec, Epoch, + EthSpec, ExecPayload, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -13,6 +16,9 @@ pub struct ConsensusContext { proposer_index: Option, /// Block root of the block at `slot`. current_block_root: Option, + /// Cache of indexed attestations constructed during block processing. + indexed_attestations: + HashMap<(AttestationData, BitList), IndexedAttestation>, _phantom: PhantomData, } @@ -20,6 +26,7 @@ pub struct ConsensusContext { pub enum ContextError { BeaconState(BeaconStateError), SlotMismatch { slot: Slot, expected: Slot }, + EpochMismatch { epoch: Epoch, expected: Epoch }, } impl From for ContextError { @@ -34,6 +41,7 @@ impl ConsensusContext { slot, proposer_index: None, current_block_root: None, + indexed_attestations: HashMap::new(), _phantom: PhantomData, } } @@ -43,13 +51,39 @@ impl ConsensusContext { self } + /// Strict method for fetching the proposer index. + /// + /// Gets the proposer index for `self.slot` while ensuring that it matches `state.slot()`. This + /// method should be used in block processing and almost everywhere the proposer index is + /// required. If the slot check is too restrictive, see `get_proposer_index_from_epoch_state`. pub fn get_proposer_index( &mut self, state: &BeaconState, spec: &ChainSpec, ) -> Result { self.check_slot(state.slot())?; + self.get_proposer_index_no_checks(state, spec) + } + /// More liberal method for fetching the proposer index. + /// + /// Fetches the proposer index for `self.slot` but does not require the state to be from an + /// exactly matching slot (merely a matching epoch). This is useful in batch verification where + /// we want to extract the proposer index from a single state for every slot in the epoch. + pub fn get_proposer_index_from_epoch_state( + &mut self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + self.check_epoch(state.current_epoch())?; + self.get_proposer_index_no_checks(state, spec) + } + + fn get_proposer_index_no_checks( + &mut self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { if let Some(proposer_index) = self.proposer_index { return Ok(proposer_index); } @@ -89,4 +123,39 @@ impl ConsensusContext { }) } } + + fn check_epoch(&self, epoch: Epoch) -> Result<(), ContextError> { + let expected = self.slot.epoch(T::slots_per_epoch()); + if epoch == expected { + Ok(()) + } else { + Err(ContextError::EpochMismatch { epoch, expected }) + } + } + + pub fn get_indexed_attestation( + &mut self, + state: &BeaconState, + attestation: &Attestation, + ) -> Result<&IndexedAttestation, BlockOperationError> { + let key = ( + attestation.data.clone(), + attestation.aggregation_bits.clone(), + ); + + match self.indexed_attestations.entry(key) { + Entry::Occupied(occupied) => Ok(occupied.into_mut()), + Entry::Vacant(vacant) => { + let committee = + state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; + let indexed_attestation = + get_indexed_attestation(committee.committee, attestation)?; + Ok(vacant.insert(indexed_attestation)) + } + } + } + + pub fn num_cached_indexed_attestations(&self) -> usize { + self.indexed_attestations.len() + } } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index cccc8eacd9..7d0cb01aeb 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -111,16 +111,13 @@ pub fn per_block_processing>( let verify_signatures = match block_signature_strategy { BlockSignatureStrategy::VerifyBulk => { // Verify all signatures in the block at once. - let block_root = Some(ctxt.get_current_block_root(signed_block)?); - let proposer_index = Some(ctxt.get_proposer_index(state, spec)?); block_verify!( BlockSignatureVerifier::verify_entire_block( state, |i| get_pubkey_from_state(state, i), |pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned), signed_block, - block_root, - proposer_index, + ctxt, spec ) .is_ok(), @@ -339,6 +336,7 @@ pub fn get_new_eth1_data( /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload pub fn partially_verify_execution_payload>( state: &BeaconState, + block_slot: Slot, payload: &Payload, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { @@ -359,7 +357,7 @@ pub fn partially_verify_execution_payload>( } ); - let timestamp = compute_timestamp_at_slot(state, spec)?; + let timestamp = compute_timestamp_at_slot(state, block_slot, spec)?; block_verify!( payload.timestamp() == timestamp, BlockProcessingError::ExecutionInvalidTimestamp { @@ -383,7 +381,7 @@ pub fn process_execution_payload>( payload: &Payload, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - partially_verify_execution_payload(state, payload, spec)?; + partially_verify_execution_payload(state, state.slot(), payload, spec)?; *state.latest_execution_payload_header_mut()? = payload.to_execution_payload_header(); @@ -420,9 +418,10 @@ pub fn is_execution_enabled>( /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#compute_timestamp_at_slot pub fn compute_timestamp_at_slot( state: &BeaconState, + block_slot: Slot, spec: &ChainSpec, ) -> Result { - let slots_since_genesis = state.slot().as_u64().safe_sub(spec.genesis_slot.as_u64())?; + let slots_since_genesis = block_slot.as_u64().safe_sub(spec.genesis_slot.as_u64())?; slots_since_genesis .safe_mul(spec.seconds_per_slot) .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 7584df14ec..5e52ff8cb8 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -1,14 +1,13 @@ #![allow(clippy::integer_arithmetic)] use super::signature_sets::{Error as SignatureSetError, *}; -use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; +use crate::{ConsensusContext, ContextError}; use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, IndexedAttestation, - SignedBeaconBlock, + BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, }; pub type Result = std::result::Result; @@ -28,6 +27,8 @@ pub enum Error { IncorrectBlockProposer { block: u64, local_shuffling: u64 }, /// Failed to load a signature set. The block may be invalid or we failed to process it. SignatureSetError(SignatureSetError), + /// Error related to the consensus context, likely the proposer index or block root calc. + ContextError(ContextError), } impl From for Error { @@ -36,6 +37,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ContextError) -> Error { + Error::ContextError(e) + } +} + impl From for Error { fn from(e: SignatureSetError) -> Error { match e { @@ -122,12 +129,11 @@ where get_pubkey: F, decompressor: D, block: &'a SignedBeaconBlock, - block_root: Option, - verified_proposer_index: Option, + ctxt: &mut ConsensusContext, spec: &'a ChainSpec, ) -> Result<()> { let mut verifier = Self::new(state, get_pubkey, decompressor, spec); - verifier.include_all_signatures(block, block_root, verified_proposer_index)?; + verifier.include_all_signatures(block, ctxt)?; verifier.verify() } @@ -135,11 +141,14 @@ where pub fn include_all_signatures>( &mut self, block: &'a SignedBeaconBlock, - block_root: Option, - verified_proposer_index: Option, + ctxt: &mut ConsensusContext, ) -> Result<()> { + let block_root = Some(ctxt.get_current_block_root(block)?); + let verified_proposer_index = + Some(ctxt.get_proposer_index_from_epoch_state(self.state, self.spec)?); + self.include_block_proposal(block, block_root, verified_proposer_index)?; - self.include_all_signatures_except_proposal(block, verified_proposer_index)?; + self.include_all_signatures_except_proposal(block, ctxt)?; Ok(()) } @@ -149,12 +158,14 @@ where pub fn include_all_signatures_except_proposal>( &mut self, block: &'a SignedBeaconBlock, - verified_proposer_index: Option, + ctxt: &mut ConsensusContext, ) -> Result<()> { + let verified_proposer_index = + Some(ctxt.get_proposer_index_from_epoch_state(self.state, self.spec)?); self.include_randao_reveal(block, verified_proposer_index)?; self.include_proposer_slashings(block)?; self.include_attester_slashings(block)?; - self.include_attestations(block)?; + self.include_attestations(block, ctxt)?; // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; self.include_sync_aggregate(block)?; @@ -260,7 +271,8 @@ where pub fn include_attestations>( &mut self, block: &'a SignedBeaconBlock, - ) -> Result>> { + ctxt: &mut ConsensusContext, + ) -> Result<()> { self.sets .sets .reserve(block.message().body().attestations().len()); @@ -270,28 +282,18 @@ where .body() .attestations() .iter() - .try_fold( - Vec::with_capacity(block.message().body().attestations().len()), - |mut vec, attestation| { - let committee = self - .state - .get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = - get_indexed_attestation(committee.committee, attestation)?; + .try_for_each(|attestation| { + let indexed_attestation = ctxt.get_indexed_attestation(self.state, attestation)?; - self.sets.push(indexed_attestation_signature_set( - self.state, - self.get_pubkey.clone(), - &attestation.signature, - &indexed_attestation, - self.spec, - )?); - - vec.push(indexed_attestation); - - Ok(vec) - }, - ) + self.sets.push(indexed_attestation_signature_set( + self.state, + self.get_pubkey.clone(), + &attestation.signature, + indexed_attestation, + self.spec, + )?); + Ok(()) + }) .map_err(Error::into) } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 1000586e66..9f27c4c9a1 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -57,8 +57,14 @@ pub mod base { // Verify and apply each attestation. for (i, attestation) in attestations.iter().enumerate() { - verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec) - .map_err(|e| e.into_with_index(i))?; + verify_attestation_for_block_inclusion( + state, + attestation, + ctxt, + verify_signatures, + spec, + ) + .map_err(|e| e.into_with_index(i))?; let pending_attestation = PendingAttestation { aggregation_bits: attestation.aggregation_bits.clone(), @@ -94,19 +100,11 @@ pub mod altair { ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - let proposer_index = ctxt.get_proposer_index(state, spec)?; attestations .iter() .enumerate() .try_for_each(|(i, attestation)| { - process_attestation( - state, - attestation, - i, - proposer_index, - verify_signatures, - spec, - ) + process_attestation(state, attestation, i, ctxt, verify_signatures, spec) }) } @@ -114,16 +112,24 @@ pub mod altair { state: &mut BeaconState, attestation: &Attestation, att_index: usize, - proposer_index: u64, + ctxt: &mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; - let indexed_attestation = - verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec) - .map_err(|e| e.into_with_index(att_index))?; + let proposer_index = ctxt.get_proposer_index(state, spec)?; + + let attesting_indices = &verify_attestation_for_block_inclusion( + state, + attestation, + ctxt, + verify_signatures, + spec, + ) + .map_err(|e| e.into_with_index(att_index))? + .attesting_indices; // Matching roots, participation flag indices let data = &attestation.data; @@ -135,7 +141,7 @@ pub mod altair { let total_active_balance = state.get_total_active_balance()?; let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; let mut proposer_reward_numerator = 0; - for index in &indexed_attestation.attesting_indices { + for index in attesting_indices { let index = *index as usize; for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 5d8113af4f..303a6e3913 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -1,7 +1,7 @@ use super::errors::{AttestationInvalid as Invalid, BlockOperationError}; use super::VerifySignatures; -use crate::common::get_indexed_attestation; use crate::per_block_processing::is_valid_indexed_attestation; +use crate::ConsensusContext; use safe_arith::SafeArith; use types::*; @@ -15,12 +15,13 @@ fn error(reason: Invalid) -> BlockOperationError { /// to `state`. Otherwise, returns a descriptive `Err`. /// /// Optionally verifies the aggregate signature, depending on `verify_signatures`. -pub fn verify_attestation_for_block_inclusion( +pub fn verify_attestation_for_block_inclusion<'ctxt, T: EthSpec>( state: &BeaconState, attestation: &Attestation, + ctxt: &'ctxt mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result> { +) -> Result<&'ctxt IndexedAttestation> { let data = &attestation.data; verify!( @@ -39,7 +40,7 @@ pub fn verify_attestation_for_block_inclusion( } ); - verify_attestation_for_state(state, attestation, verify_signatures, spec) + verify_attestation_for_state(state, attestation, ctxt, verify_signatures, spec) } /// Returns `Ok(())` if `attestation` is a valid attestation to the chain that precedes the given @@ -49,12 +50,13 @@ pub fn verify_attestation_for_block_inclusion( /// prior blocks in `state`. /// /// Spec v0.12.1 -pub fn verify_attestation_for_state( +pub fn verify_attestation_for_state<'ctxt, T: EthSpec>( state: &BeaconState, attestation: &Attestation, + ctxt: &'ctxt mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result> { +) -> Result<&'ctxt IndexedAttestation> { let data = &attestation.data; verify!( @@ -66,9 +68,8 @@ pub fn verify_attestation_for_state( verify_casper_ffg_vote(attestation, state)?; // Check signature and bitfields - let committee = state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = get_indexed_attestation(committee.committee, attestation)?; - is_valid_indexed_attestation(state, &indexed_attestation, verify_signatures, spec)?; + let indexed_attestation = ctxt.get_indexed_attestation(state, attestation)?; + is_valid_indexed_attestation(state, indexed_attestation, verify_signatures, spec)?; Ok(indexed_attestation) } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 2b28e30778..19c945c73e 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -780,7 +780,7 @@ impl ChainSpec { domain_sync_committee_selection_proof: 8, domain_contribution_and_proof: 9, altair_fork_version: [0x01, 0x00, 0x00, 0x64], - altair_fork_epoch: Some(Epoch::new(256)), + altair_fork_epoch: Some(Epoch::new(512)), /* * Merge hard fork params @@ -791,14 +791,11 @@ impl ChainSpec { .expect("pow does not overflow"), proportional_slashing_multiplier_bellatrix: 3, bellatrix_fork_version: [0x02, 0x00, 0x00, 0x64], - bellatrix_fork_epoch: None, - terminal_total_difficulty: Uint256::MAX - .checked_sub(Uint256::from(2u64.pow(10))) - .expect("subtraction does not overflow") - // Add 1 since the spec declares `2**256 - 2**10` and we use - // `Uint256::MAX` which is `2*256- 1`. - .checked_add(Uint256::one()) - .expect("addition does not overflow"), + bellatrix_fork_epoch: Some(Epoch::new(385536)), + terminal_total_difficulty: Uint256::from_dec_str( + "8626000000000000000000058750000000000000000000", + ) + .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index c0023f3505..33accfc057 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -98,10 +98,9 @@ fn parse_client_config( cli_args: &ArgMatches, _env: &Environment, ) -> Result { - let mut client_config = ClientConfig { - data_dir: get_data_dir(cli_args), - ..Default::default() - }; + let mut client_config = ClientConfig::default(); + + client_config.set_data_dir(get_data_dir(cli_args)); if let Some(freezer_dir) = clap_utils::parse_optional(cli_args, "freezer-dir")? { client_config.freezer_db_path = Some(freezer_dir); @@ -289,7 +288,7 @@ pub fn prune_payloads( } /// Run the database manager, returning an error string if the operation did not succeed. -pub fn run(cli_args: &ArgMatches<'_>, mut env: Environment) -> Result<(), String> { +pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result<(), String> { let client_config = parse_client_config(cli_args, &env)?; let context = env.core_context(); let log = context.log().clone(); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index b4f630ae15..638ab46bfb 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.2.1" +version = "3.3.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lcli/src/block_root.rs b/lcli/src/block_root.rs index 7631872c5c..a47b48a30a 100644 --- a/lcli/src/block_root.rs +++ b/lcli/src/block_root.rs @@ -37,7 +37,7 @@ use types::{EthSpec, FullPayload, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(5); -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let spec = &T::default_spec(); let executor = env.core_context().executor; diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 80bcff9094..34144cd86d 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -13,7 +13,7 @@ use types::EthSpec; pub const ETH1_GENESIS_UPDATE_INTERVAL: Duration = Duration::from_millis(7_000); pub fn run( - mut env: Environment, + env: Environment, testnet_dir: PathBuf, matches: &ArgMatches<'_>, ) -> Result<(), String> { diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 8b233d847b..9d548b0499 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -789,6 +789,7 @@ fn run( max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, }) .map_err(|e| format!("should start logger: {:?}", e))? .build() diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 8bd9af99ad..49d1dd424d 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -59,7 +59,7 @@ use types::{BeaconState, CloneConfig, EthSpec, Hash256}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let spec = &T::default_spec(); let executor = env.core_context().executor; diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index b25cec81b5..44a1772ccd 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -94,7 +94,7 @@ struct Config { exclude_post_block_thc: bool, } -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let spec = &T::default_spec(); let executor = env.core_context().executor; @@ -339,6 +339,10 @@ fn do_transition( .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build all caches (again): {:?}", t.elapsed()); + let mut ctxt = ConsensusContext::new(pre_state.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + if !config.no_signature_verification { let get_pubkey = move |validator_index| { validator_pubkey_cache @@ -359,18 +363,20 @@ fn do_transition( get_pubkey, decompressor, &block, - Some(block_root), - Some(block.message().proposer_index()), + &mut ctxt, spec, ) .map_err(|e| format!("Invalid block signature: {:?}", e))?; debug!("Batch verify block signatures: {:?}", t.elapsed()); + + // Signature verification should prime the indexed attestation cache. + assert_eq!( + ctxt.num_cached_indexed_attestations(), + block.message().body().attestations().len() + ); } let t = Instant::now(); - let mut ctxt = ConsensusContext::new(pre_state.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); per_block_processing( &mut pre_state, &block, diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 1b745a9b1e..43b06f01df 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.2.1" +version = "3.3.0" authors = ["Sigma Prime "] edition = "2021" autotests = false diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 49163b96f4..fad7edeb19 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -55,6 +55,7 @@ pub struct LoggerConfig { pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, + pub is_restricted: bool, } impl Default for LoggerConfig { fn default() -> Self { @@ -68,6 +69,7 @@ impl Default for LoggerConfig { max_log_size: 200, max_log_number: 5, compression: false, + is_restricted: true, } } } @@ -257,7 +259,7 @@ impl EnvironmentBuilder { .rotate_size(config.max_log_size) .rotate_keep(config.max_log_number) .rotate_compress(config.compression) - .restrict_permissions(true) + .restrict_permissions(config.is_restricted) .build() .map_err(|e| format!("Unable to build file logger: {}", e))?; @@ -380,7 +382,7 @@ impl Environment { } /// Returns a `Context` where no "service" has been added to the logger output. - pub fn core_context(&mut self) -> RuntimeContext { + pub fn core_context(&self) -> RuntimeContext { RuntimeContext { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), @@ -395,7 +397,7 @@ impl Environment { } /// Returns a `Context` where the `service_name` is added to the logger output. - pub fn service_context(&mut self, service_name: String) -> RuntimeContext { + pub fn service_context(&self, service_name: String) -> RuntimeContext { RuntimeContext { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 57e413a25a..54dcc2291f 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -129,6 +129,15 @@ fn main() { to store old logs.") .global(true), ) + .arg( + Arg::with_name("logfile-no-restricted-perms") + .long("logfile-no-restricted-perms") + .help( + "If present, log files will be generated as world-readable meaning they can be read by \ + any user on the machine. Note that logs can often contain sensitive information \ + about your validator and so this flag should be used with caution.") + .global(true), + ) .arg( Arg::with_name("log-format") .long("log-format") @@ -408,6 +417,8 @@ fn run( let logfile_compress = matches.is_present("logfile-compress"); + let logfile_restricted = !matches.is_present("logfile-no-restricted-perms"); + // Construct the path to the log file. let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; if log_path.is_none() { @@ -447,6 +458,7 @@ fn run( max_log_size: logfile_max_size * 1_024 * 1_024, max_log_number: logfile_max_number, compression: logfile_compress, + is_restricted: logfile_restricted, }; let builder = environment_builder.initialize_logger(logger_config.clone())?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index d69361a3a4..d39235cb13 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -56,7 +56,9 @@ impl CommandLineTestExec for CommandLineTest { fn datadir_flag() { CommandLineTest::new() .run_with_zero_port() - .with_config_and_dir(|config, dir| assert_eq!(config.data_dir, dir.path().join("beacon"))); + .with_config_and_dir(|config, dir| { + assert_eq!(*config.data_dir(), dir.path().join("beacon")) + }); } #[test] @@ -1546,6 +1548,23 @@ fn enabled_disable_log_timestamp_flag() { assert!(config.logger_config.disable_log_timestamp); }); } +#[test] +fn logfile_restricted_perms_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.is_restricted); + }); +} +#[test] +fn logfile_no_restricted_perms_flag() { + CommandLineTest::new() + .flag("logfile-no-restricted-perms", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.is_restricted == false); + }); +} #[test] fn sync_eth1_chain_default() { @@ -1585,7 +1604,7 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.enable_light_client_server, false)); + .with_config(|config| assert_eq!(config.network.enable_light_client_server, false)); } #[test] @@ -1593,5 +1612,16 @@ fn light_client_server_enabled() { CommandLineTest::new() .flag("light-client-server", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.enable_light_client_server, true)); + .with_config(|config| assert_eq!(config.network.enable_light_client_server, true)); +} + +#[test] +fn gui_flag() { + CommandLineTest::new() + .flag("gui", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.http_api.enabled); + assert!(config.validator_monitor_auto); + }); } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 8faf4db821..039efb3684 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,7 +7,7 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, CountUnrealized, + BeaconChainTypes, CachedHead, CountUnrealized, NotifyExecutionLayer, }; use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; use serde::Deserialize; @@ -388,6 +388,7 @@ impl Tester { block_root, block.clone(), CountUnrealized::False, + NotifyExecutionLayer::Yes, ))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index a351a597c0..aaa725f567 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -80,7 +80,6 @@ impl Operation for Attestation { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); - let proposer_index = ctxt.get_proposer_index(state, spec)?; match state { BeaconState::Base(_) => base::process_attestations( state, @@ -89,14 +88,9 @@ impl Operation for Attestation { &mut ctxt, spec, ), - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_attestation( - state, - self, - 0, - proposer_index, - VerifySignatures::True, - spec, - ), + BeaconState::Altair(_) | BeaconState::Merge(_) => { + altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) + } } } } diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 1fe7bf0f05..f643fbd5f2 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -8,7 +8,10 @@ use std::process::{Child, Command, Output}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const NETHERMIND_BRANCH: &str = "master"; +/// We've pinned the Nethermind version since our method of using the `master` branch to +/// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. +/// We should fix this so we always pull the latest version of Nethermind. +const NETHERMIND_BRANCH: &str = "release/1.14.6"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 6e9f37ff1f..5455b48bce 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -18,7 +18,7 @@ use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; -const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30); struct ExecutionPair { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 0933bff4c6..d0a4ef9491 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -48,7 +48,7 @@ impl LocalBeaconNode { .tempdir() .expect("should create temp directory for client datadir"); - client_config.data_dir = datadir.path().into(); + client_config.set_data_dir(datadir.path().into()); client_config.network.network_dir = PathBuf::from(datadir.path()).join("network"); ProductionBeaconNode::new(context, client_config) diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 3d59013f2a..8284bff609 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -67,6 +67,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 06f9e9a4f3..53c4447da2 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -52,6 +52,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 00e439e4c9..1c8b41f057 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -56,6 +56,7 @@ fn syncing_sim( max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index edfc330ce9..b29d97d60d 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -58,4 +58,6 @@ task_executor = { path = "../common/task_executor" } reqwest = { version = "0.11.0", features = ["json","stream"] } url = "2.2.2" malloc_utils = { path = "../common/malloc_utils" } +sysinfo = "0.26.5" +system_health = { path = "../common/system_health" } logging = { path = "../common/logging" } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index aba1b658a0..2104bdf73e 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -20,6 +20,7 @@ use eth2::lighthouse_vc::{ types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; +use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, info, warn, Logger}; use slot_clock::SlotClock; @@ -28,6 +29,8 @@ use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::sync::Arc; +use sysinfo::{System, SystemExt}; +use system_health::observe_system_health_vc; use task_executor::TaskExecutor; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; @@ -205,6 +208,35 @@ pub fn serve( let api_token_path_inner = api_token_path.clone(); let api_token_path_filter = warp::any().map(move || api_token_path_inner.clone()); + // Create a `warp` filter that provides access to local system information. + let system_info = Arc::new(RwLock::new(sysinfo::System::new())); + { + // grab write access for initialisation + let mut system_info = system_info.write(); + system_info.refresh_disks_list(); + system_info.refresh_networks_list(); + } // end lock + + let system_info_filter = + warp::any() + .map(move || system_info.clone()) + .map(|sysinfo: Arc>| { + { + // refresh stats + let mut sysinfo_lock = sysinfo.write(); + sysinfo_lock.refresh_memory(); + sysinfo_lock.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything()); + sysinfo_lock.refresh_cpu(); + sysinfo_lock.refresh_system(); + sysinfo_lock.refresh_networks(); + sysinfo_lock.refresh_disks(); + } // end lock + sysinfo + }); + + let app_start = std::time::Instant::now(); + let app_start_filter = warp::any().map(move || app_start); + // GET lighthouse/version let get_node_version = warp::path("lighthouse") .and(warp::path("version")) @@ -301,6 +333,24 @@ pub fn serve( }, ); + // GET lighthouse/ui/health + let get_lighthouse_ui_health = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("health")) + .and(warp::path::end()) + .and(system_info_filter) + .and(app_start_filter) + .and(validator_dir_filter.clone()) + .and(signer.clone()) + .and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| { + blocking_signed_json_task(signer, move || { + let app_uptime = app_start.elapsed().as_secs() as u64; + Ok(api_types::GenericResponse::from(observe_system_health_vc( + sysinfo, val_dir, app_uptime, + ))) + }) + }); + // POST lighthouse/validators/ let post_validators = warp::path("lighthouse") .and(warp::path("validators")) @@ -977,6 +1027,7 @@ pub fn serve( .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) + .or(get_lighthouse_ui_health) .or(get_fee_recipient) .or(get_gas_limit) .or(get_std_keystores)