From 3b117f4bf68666747b20e39e4333073a7764b1e2 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 12 Apr 2023 01:48:19 +0000 Subject: [PATCH 01/16] Add a flag to disable peer scoring (#4135) ## Issue Addressed N/A ## Proposed Changes Adds a flag for disabling peer scoring. This is useful for local testing and testing small networks for new features. --- beacon_node/http_api/src/test_utils.rs | 1 + beacon_node/lighthouse_network/src/config.rs | 4 ++ .../lighthouse_network/src/discovery/mod.rs | 1 + .../src/peer_manager/peerdb.rs | 39 +++++++++++++++++-- .../lighthouse_network/src/service/mod.rs | 1 + .../lighthouse_network/src/types/globals.rs | 4 +- .../network/src/beacon_processor/tests.rs | 1 + beacon_node/src/cli.rs | 8 ++++ beacon_node/src/config.rs | 4 ++ lighthouse/tests/beacon_node.rs | 7 ++++ 10 files changed, 65 insertions(+), 5 deletions(-) diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 6f918e1b9e..8dc9be7dd4 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -155,6 +155,7 @@ pub async fn create_api_server_on_port( None, meta_data, vec![], + false, &log, )); diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 79041f6d90..d8efa20209 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -101,6 +101,9 @@ pub struct Config { /// List of trusted libp2p nodes which are not scored. pub trusted_peers: Vec, + /// Disables peer scoring altogether. + pub disable_peer_scoring: bool, + /// Client version pub client_version: String, @@ -309,6 +312,7 @@ impl Default for Config { boot_nodes_multiaddr: vec![], libp2p_nodes: vec![], trusted_peers: vec![], + disable_peer_scoring: false, client_version: lighthouse_version::version_with_platform(), disable_discovery: false, upnp_enabled: true, diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 2966644a89..13fdf8ed57 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1162,6 +1162,7 @@ mod tests { syncnets: Default::default(), }), vec![], + false, &log, ); Discovery::new(&keypair, &config, Arc::new(globals), &log) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 70d3399d6a..2087065688 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -41,12 +41,14 @@ pub struct PeerDB { disconnected_peers: usize, /// Counts banned peers in total and per ip banned_peers_count: BannedPeersCount, + /// Specifies if peer scoring is disabled. + disable_peer_scoring: bool, /// PeerDB's logger log: slog::Logger, } impl PeerDB { - pub fn new(trusted_peers: Vec, log: &slog::Logger) -> Self { + pub fn new(trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger) -> Self { // Initialize the peers hashmap with trusted peers let peers = trusted_peers .into_iter() @@ -56,6 +58,7 @@ impl PeerDB { log: log.clone(), disconnected_peers: 0, banned_peers_count: BannedPeersCount::default(), + disable_peer_scoring, peers, } } @@ -704,7 +707,11 @@ impl PeerDB { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); } - PeerInfo::default() + if self.disable_peer_scoring { + PeerInfo::trusted_peer_info() + } else { + PeerInfo::default() + } }); // Ban the peer if the score is not already low enough. @@ -1300,7 +1307,7 @@ mod tests { fn get_db() -> PeerDB { let log = build_log(slog::Level::Debug, false); - PeerDB::new(vec![], &log) + PeerDB::new(vec![], false, &log) } #[test] @@ -1999,7 +2006,7 @@ mod tests { fn test_trusted_peers_score() { let trusted_peer = PeerId::random(); let log = build_log(slog::Level::Debug, false); - let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], &log); + let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false, &log); pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); @@ -2018,4 +2025,28 @@ mod tests { Score::max_score().score() ); } + + #[test] + fn test_disable_peer_scoring() { + let peer = PeerId::random(); + let log = build_log(slog::Level::Debug, false); + let mut pdb: PeerDB = PeerDB::new(vec![], true, &log); + + pdb.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); + + // Check trusted status and score + assert!(pdb.peer_info(&peer).unwrap().is_trusted()); + assert_eq!( + pdb.peer_info(&peer).unwrap().score().score(), + Score::max_score().score() + ); + + // Adding/Subtracting score should have no effect on a trusted peer + add_score(&mut pdb, &peer, -50.0); + + assert_eq!( + pdb.peer_info(&peer).unwrap().score().score(), + Score::max_score().score() + ); + } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index dc9b44849f..f815e3bd36 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -171,6 +171,7 @@ impl Network { .iter() .map(|x| PeerId::from(x.clone())) .collect(), + config.disable_peer_scoring, &log, ); Arc::new(globals) diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index ee2b300e20..43e8ebd76a 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -39,6 +39,7 @@ impl NetworkGlobals { listen_port_tcp6: Option, local_metadata: MetaData, trusted_peers: Vec, + disable_peer_scoring: bool, log: &slog::Logger, ) -> Self { NetworkGlobals { @@ -48,7 +49,7 @@ impl NetworkGlobals { listen_port_tcp4, listen_port_tcp6, local_metadata: RwLock::new(local_metadata), - peers: RwLock::new(PeerDB::new(trusted_peers, log)), + peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)), gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), @@ -144,6 +145,7 @@ impl NetworkGlobals { syncnets: Default::default(), }), vec![], + false, log, ) } diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index b7c102ae11..4b0a159eb4 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -185,6 +185,7 @@ impl TestRig { None, meta_data, vec![], + false, &log, )); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 71d7d68c45..72a5dda952 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -233,6 +233,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.") .takes_value(false), ) + .arg( + Arg::with_name("disable-peer-scoring") + .long("disable-peer-scoring") + .help("Disables peer scoring in lighthouse. WARNING: This is a dev only flag is only meant to be used in local testing scenarios \ + Using this flag on a real network may cause your node to become eclipsed and see a different view of the network") + .takes_value(false) + .hidden(true), + ) .arg( Arg::with_name("trusted-peers") .long("trusted-peers") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 8799bdeeec..6ad1fea3b2 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1008,6 +1008,10 @@ pub fn set_network_config( .collect::, _>>()?; } + if cli_args.is_present("disable-peer-scoring") { + config.disable_peer_scoring = true; + } + if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") { config.trusted_peers = trusted_peers_str .split(',') diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index a61d9cbf74..64646a6c57 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1045,6 +1045,13 @@ fn disable_discovery_flag() { .with_config(|config| assert!(config.network.disable_discovery)); } #[test] +fn disable_peer_scoring_flag() { + CommandLineTest::new() + .flag("disable-peer-scoring", None) + .run_with_zero_port() + .with_config(|config| assert!(config.network.disable_peer_scoring)); +} +#[test] fn disable_upnp_flag() { CommandLineTest::new() .flag("disable-upnp", None) From 0e2e23e08859e0d241a76331c8bd4ba5aacd12bc Mon Sep 17 00:00:00 2001 From: Mac L Date: Wed, 12 Apr 2023 01:48:21 +0000 Subject: [PATCH 02/16] Remove the unused `ExecutionOptimisticForkVersionedResponse` type (#4160) ## Issue Addressed #4146 ## Proposed Changes Removes the `ExecutionOptimisticForkVersionedResponse` type and the associated Beacon API endpoint which is now deprecated. Also removes the test associated with the endpoint. --- beacon_node/http_api/tests/tests.rs | 15 -------- common/eth2/src/lib.rs | 17 --------- .../types/src/fork_versioned_response.rs | 37 ------------------- consensus/types/src/lib.rs | 4 +- 4 files changed, 1 insertion(+), 72 deletions(-) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 0ef27febea..a54f17e96f 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1875,21 +1875,6 @@ impl ApiTester { .unwrap(); assert_eq!(result_ssz, expected, "{:?}", state_id); - // Check legacy v1 API. - let result_v1 = self - .client - .get_debug_beacon_states_v1(state_id.0) - .await - .unwrap(); - - if let (Some(json), Some(expected)) = (&result_v1, &expected) { - assert_eq!(json.version, None); - assert_eq!(json.data, *expected, "{:?}", state_id); - } else { - assert_eq!(result_v1, None); - assert_eq!(expected, None); - } - // Check that version headers are provided. let url = self .client diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 1a7cf29790..e03cc2e9b0 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1280,23 +1280,6 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } - /// `GET v1/debug/beacon/states/{state_id}` (LEGACY) - pub async fn get_debug_beacon_states_v1( - &self, - state_id: StateId, - ) -> Result>>, Error> { - let mut path = self.eth_path(V1)?; - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("debug") - .push("beacon") - .push("states") - .push(&state_id.to_string()); - - self.get_opt(path).await - } - /// `GET debug/beacon/states/{state_id}` /// `-H "accept: application/octet-stream"` pub async fn get_debug_beacon_states_ssz( diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index 45df151eb4..2d97dc1219 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -45,43 +45,6 @@ where } } -#[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ExecutionOptimisticForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub execution_optimistic: Option, - pub data: T, -} - -impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticForkVersionedResponse -where - F: ForkVersionDeserialize, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - struct Helper { - version: Option, - execution_optimistic: Option, - data: serde_json::Value, - } - - let helper = Helper::deserialize(deserializer)?; - let data = match helper.version { - Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, - None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, - }; - - Ok(ExecutionOptimisticForkVersionedResponse { - version: helper.version, - execution_optimistic: helper.execution_optimistic, - data, - }) - } -} - pub trait ForkVersionDeserialize: Sized + DeserializeOwned { fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: Value, diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 8240742441..aefb45490a 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -143,9 +143,7 @@ pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::fork_versioned_response::{ - ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse, -}; +pub use crate::fork_versioned_response::{ForkVersionDeserialize, ForkVersionedResponse}; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; From 00cf5fc1848181ca69ed39e1447916b0a4387aba Mon Sep 17 00:00:00 2001 From: ethDreamer Date: Wed, 12 Apr 2023 01:48:22 +0000 Subject: [PATCH 03/16] Remove Redundant Trait Bound (#4169) I realized this is redundant while reasoning about how the `store` is implemented given the [definition of `ItemStore`](https://github.com/sigp/lighthouse/blob/v4.0.1/beacon_node/store/src/lib.rs#L107) ```rust pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'static { ... } ``` --- beacon_node/store/src/reconstruct.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index c939fd3f51..c399f1b457 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -1,6 +1,6 @@ //! Implementation of historic state reconstruction (given complete block history). use crate::hot_cold_store::{HotColdDB, HotColdDBError}; -use crate::{Error, ItemStore, KeyValueStore}; +use crate::{Error, ItemStore}; use itertools::{process_results, Itertools}; use slog::info; use state_processing::{ @@ -13,8 +13,8 @@ use types::{EthSpec, Hash256}; impl HotColdDB where E: EthSpec, - Hot: KeyValueStore + ItemStore, - Cold: KeyValueStore + ItemStore, + Hot: ItemStore, + Cold: ItemStore, { pub fn reconstruct_historic_states(self: &Arc) -> Result<(), Error> { let mut anchor = if let Some(anchor) = self.get_anchor_info() { From b90c0c3fb1badd86247b8a338b44e7e17e134a85 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 13 Apr 2023 07:05:01 +0000 Subject: [PATCH 04/16] Make re-org strat more cautious and add more config (#4151) ## Proposed Changes This change attempts to prevent failed re-orgs by: 1. Lowering the re-org cutoff from 2s to 1s. This is informed by a failed re-org attempted by @yorickdowne's node. The failed block was requested in the 1.5-2s window due to a Vouch failure, and failed to propagate to the majority of the network before the attestation deadline at 4s. 2. Allow users to adjust their re-org cutoff depending on observed network conditions and their risk profile. The static 2 second cutoff was too rigid. 3. Add a `--proposer-reorg-disallowed-offsets` flag which can be used to prohibit reorgs at certain slots. This is intended to help workaround an issue whereby reorging blocks at slot 1 are currently taking ~1.6s to propagate on gossip rather than ~500ms. This is suspected to be due to a cache miss in current versions of Prysm, which should be fixed in their next release. ## Additional Info I'm of two minds about removing the `shuffling_stable` check which checks for blocks at slot 0 in the epoch. If we removed it users would be able to configure Lighthouse to try reorging at slot 0, which likely wouldn't work very well due to interactions with the proposer index cache. I think we could leave it for now and revisit it later. --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 ++---- beacon_node/beacon_chain/src/builder.rs | 11 ++++- beacon_node/beacon_chain/src/chain_config.rs | 24 +++++++++- .../http_api/tests/interactive_tests.rs | 35 +++++++++++++- beacon_node/src/cli.rs | 22 +++++++++ beacon_node/src/config.rs | 19 +++++++- book/src/late-block-re-orgs.md | 9 ++++ consensus/fork_choice/src/fork_choice.rs | 8 +++- consensus/proto_array/src/error.rs | 1 + consensus/proto_array/src/lib.rs | 4 +- .../src/proto_array_fork_choice.rs | 40 ++++++++++++++++ lighthouse/tests/beacon_node.rs | 48 +++++++++++++++++++ 12 files changed, 218 insertions(+), 18 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d3c391e4ed..283dcf96c0 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -106,7 +106,6 @@ use task_executor::{ShutdownReason, TaskExecutor}; use tokio_stream::Stream; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; -use types::consts::merge::INTERVALS_PER_SLOT; use types::*; pub type ForkChoiceError = fork_choice::Error; @@ -128,12 +127,6 @@ pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1) /// The timeout for the eth1 finalization cache pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); -/// The latest delay from the start of the slot at which to attempt a 1-slot re-org. -fn max_re_org_slot_delay(seconds_per_slot: u64) -> Duration { - // Allow at least half of the attestation deadline for the block to propagate. - Duration::from_secs(seconds_per_slot) / INTERVALS_PER_SLOT as u32 / 2 -} - // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -3761,7 +3754,7 @@ impl BeaconChain { // 1. It seems we have time to propagate and still receive the proposer boost. // 2. The current head block was seen late. // 3. The `get_proposer_head` conditions from fork choice pass. - let proposing_on_time = slot_delay < max_re_org_slot_delay(self.spec.seconds_per_slot); + let proposing_on_time = slot_delay < self.config.re_org_cutoff(self.spec.seconds_per_slot); if !proposing_on_time { debug!( self.log, @@ -3791,6 +3784,7 @@ impl BeaconChain { slot, canonical_head, re_org_threshold, + &self.config.re_org_disallowed_offsets, self.config.re_org_max_epochs_since_finalization, ) .map_err(|e| match e { @@ -4069,6 +4063,7 @@ impl BeaconChain { .get_preliminary_proposer_head( head_block_root, re_org_threshold, + &self.config.re_org_disallowed_offsets, self.config.re_org_max_epochs_since_finalization, ) .map_err(|e| e.map_inner_error(Error::ProposerHeadForkChoiceError))?; @@ -4079,7 +4074,7 @@ impl BeaconChain { let re_org_block_slot = head_slot + 1; let fork_choice_slot = info.current_slot; - // If a re-orging proposal isn't made by the `max_re_org_slot_delay` then we give up + // If a re-orging proposal isn't made by the `re_org_cutoff` then we give up // and allow the fork choice update for the canonical head through so that we may attest // correctly. let current_slot_ok = if head_slot == fork_choice_slot { @@ -4090,7 +4085,7 @@ impl BeaconChain { .and_then(|slot_start| { let now = self.slot_clock.now_duration()?; let slot_delay = now.saturating_sub(slot_start); - Some(slot_delay <= max_re_org_slot_delay(self.spec.seconds_per_slot)) + Some(slot_delay <= self.config.re_org_cutoff(self.spec.seconds_per_slot)) }) .unwrap_or(false) } else { diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 8ad874ea09..6ee97a95c1 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -22,7 +22,7 @@ use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; -use proto_array::ReOrgThreshold; +use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; use slog::{crit, error, info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; @@ -175,6 +175,15 @@ where self } + /// Sets the proposer re-org disallowed offsets list. + pub fn proposer_re_org_disallowed_offsets( + mut self, + disallowed_offsets: DisallowedReOrgOffsets, + ) -> Self { + self.chain_config.re_org_disallowed_offsets = disallowed_offsets; + self + } + /// Sets the store (database). /// /// Should generally be called early in the build chain. diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index c72c3d2cd4..9921435313 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,10 +1,12 @@ -pub use proto_array::ReOrgThreshold; +pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde_derive::{Deserialize, Serialize}; use std::time::Duration; use types::{Checkpoint, Epoch}; pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); +/// Default to 1/12th of the slot, which is 1 second on mainnet. +pub const DEFAULT_RE_ORG_CUTOFF_DENOMINATOR: u32 = 12; pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250; /// Default fraction of a slot lookahead for payload preparation (12/3 = 4 seconds on mainnet). @@ -34,6 +36,13 @@ pub struct ChainConfig { pub re_org_threshold: Option, /// Maximum number of epochs since finalization for attempting a proposer re-org. pub re_org_max_epochs_since_finalization: Epoch, + /// Maximum delay after the start of the slot at which to propose a reorging block. + pub re_org_cutoff_millis: Option, + /// Additional epoch offsets at which re-orging block proposals are not permitted. + /// + /// By default this list is empty, but it can be useful for reacting to network conditions, e.g. + /// slow gossip of re-org blocks at slot 1 in the epoch. + pub re_org_disallowed_offsets: DisallowedReOrgOffsets, /// Number of milliseconds to wait for fork choice before proposing a block. /// /// If set to 0 then block proposal will not wait for fork choice at all. @@ -82,6 +91,8 @@ impl Default for ChainConfig { max_network_size: 10 * 1_048_576, // 10M re_org_threshold: Some(DEFAULT_RE_ORG_THRESHOLD), re_org_max_epochs_since_finalization: DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + re_org_cutoff_millis: None, + re_org_disallowed_offsets: DisallowedReOrgOffsets::default(), fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, // Builder fallback configs that are set in `clap` will override these. builder_fallback_skips: 3, @@ -100,3 +111,14 @@ impl Default for ChainConfig { } } } + +impl ChainConfig { + /// The latest delay from the start of the slot at which to attempt a 1-slot re-org. + pub fn re_org_cutoff(&self, seconds_per_slot: u64) -> Duration { + self.re_org_cutoff_millis + .map(Duration::from_millis) + .unwrap_or_else(|| { + Duration::from_secs(seconds_per_slot) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR + }) + } +} diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 9763b8037b..da92419744 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,6 +1,6 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` use beacon_chain::{ - chain_config::ReOrgThreshold, + chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, }; use eth2::types::DepositContractData; @@ -110,6 +110,8 @@ pub struct ReOrgTest { misprediction: bool, /// Whether to expect withdrawals to change on epoch boundaries. expect_withdrawals_change_on_epoch: bool, + /// Epoch offsets to avoid proposing reorg blocks at. + disallowed_offsets: Vec, } impl Default for ReOrgTest { @@ -127,6 +129,7 @@ impl Default for ReOrgTest { should_re_org: true, misprediction: false, expect_withdrawals_change_on_epoch: false, + disallowed_offsets: vec![], } } } @@ -238,6 +241,32 @@ pub async fn proposer_boost_re_org_head_distance() { .await; } +// Check that a re-org at a disallowed offset fails. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_disallowed_offset() { + let offset = 4; + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(E::slots_per_epoch() + offset - 1), + disallowed_offsets: vec![offset], + should_re_org: false, + ..Default::default() + }) + .await; +} + +// Check that a re-org at the *only* allowed offset succeeds. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_disallowed_offset_exact() { + let offset = 4; + let disallowed_offsets = (0..E::slots_per_epoch()).filter(|o| *o != offset).collect(); + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(E::slots_per_epoch() + offset - 1), + disallowed_offsets, + ..Default::default() + }) + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn proposer_boost_re_org_very_unhealthy() { proposer_boost_re_org_test(ReOrgTest { @@ -286,6 +315,7 @@ pub async fn proposer_boost_re_org_test( should_re_org, misprediction, expect_withdrawals_change_on_epoch, + disallowed_offsets, }: ReOrgTest, ) { assert!(head_slot > 0); @@ -320,6 +350,9 @@ pub async fn proposer_boost_re_org_test( .proposer_re_org_max_epochs_since_finalization(Epoch::new( max_epochs_since_finalization, )) + .proposer_re_org_disallowed_offsets( + DisallowedReOrgOffsets::new::(disallowed_offsets).unwrap(), + ) })), ) .await; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 72a5dda952..8a5c33ac0f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -885,6 +885,28 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { allowed. Default: 2") .conflicts_with("disable-proposer-reorgs") ) + .arg( + Arg::with_name("proposer-reorg-cutoff") + .long("proposer-reorg-cutoff") + .value_name("MILLISECONDS") + .help("Maximum delay after the start of the slot at which to propose a reorging \ + block. Lower values can prevent failed reorgs by ensuring the block has \ + ample time to propagate and be processed by the network. The default is \ + 1/12th of a slot (1 second on mainnet)") + .conflicts_with("disable-proposer-reorgs") + ) + .arg( + Arg::with_name("proposer-reorg-disallowed-offsets") + .long("proposer-reorg-disallowed-offsets") + .value_name("N1,N2,...") + .help("Comma-separated list of integer offsets which can be used to avoid \ + proposing reorging blocks at certain slots. An offset of N means that \ + reorging proposals will not be attempted at any slot such that \ + `slot % SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be \ + avoided. Any offsets supplied with this flag will impose additional \ + restrictions.") + .conflicts_with("disable-proposer-reorgs") + ) .arg( Arg::with_name("prepare-payload-lookahead") .long("prepare-payload-lookahead") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6ad1fea3b2..55664897e8 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,5 @@ use beacon_chain::chain_config::{ - ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, + DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; use clap::ArgMatches; @@ -686,6 +686,23 @@ pub fn get_config( client_config.chain.re_org_max_epochs_since_finalization = clap_utils::parse_optional(cli_args, "proposer-reorg-epochs-since-finalization")? .unwrap_or(DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION); + client_config.chain.re_org_cutoff_millis = + clap_utils::parse_optional(cli_args, "proposer-reorg-cutoff")?; + + if let Some(disallowed_offsets_str) = + clap_utils::parse_optional::(cli_args, "proposer-reorg-disallowed-offsets")? + { + let disallowed_offsets = disallowed_offsets_str + .split(',') + .map(|s| { + s.parse() + .map_err(|e| format!("invalid disallowed-offsets: {e:?}")) + }) + .collect::, _>>()?; + client_config.chain.re_org_disallowed_offsets = + DisallowedReOrgOffsets::new::(disallowed_offsets) + .map_err(|e| format!("invalid disallowed-offsets: {e:?}"))?; + } } // Note: This overrides any previous flags that enable this option. diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index 0014af8f15..fc4530589d 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -14,6 +14,15 @@ There are three flags which control the re-orging behaviour: * `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. * `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, meaning re-orgs will only be attempted when the chain is finalizing optimally. +* `--proposer-reorg-cutoff T`: only attempt to re-org late blocks when the proposal is being made + before T milliseconds into the slot. Delays between the validator client and the beacon node can + cause some blocks to be requested later than the start of the slot, which makes them more likely + to fail. The default cutoff is 1000ms on mainnet, which gives blocks 3000ms to be signed and + propagated before the attestation deadline at 4000ms. +* `--proposer-reorg-disallowed-offsets N1,N2,N3...`: Prohibit Lighthouse from attempting to reorg at + specific offsets in each epoch. A disallowed offset `N` prevents reorging blocks from being + proposed at any `slot` such that `slot % SLOTS_PER_EPOCH == N`. The value to this flag is a + comma-separated list of integer offsets. All flags should be applied to `lighthouse bn`. The default configuration is recommended as it balances the chance of the re-org succeeding against the chance of failure due to attestations diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index b9d2046761..e6c46e83e7 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,7 +1,7 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{ - Block as ProtoBlock, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, - ProtoArrayForkChoice, ReOrgThreshold, + Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, + ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; @@ -533,6 +533,7 @@ where current_slot: Slot, canonical_head: Hash256, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result>> { // Ensure that fork choice has already been updated for the current slot. This prevents @@ -564,6 +565,7 @@ where canonical_head, self.fc_store.justified_balances(), re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, ) .map_err(ProposerHeadError::convert_inner_error) @@ -573,6 +575,7 @@ where &self, canonical_head: Hash256, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result>> { let current_slot = self.fc_store.get_current_slot(); @@ -582,6 +585,7 @@ where canonical_head, self.fc_store.justified_balances(), re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, ) .map_err(ProposerHeadError::convert_inner_error) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index c55739da79..1fe45fd0f1 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -50,6 +50,7 @@ pub enum Error { block_root: Hash256, parent_root: Hash256, }, + InvalidEpochOffset(u64), Arith(ArithError), } diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index e84139345a..481daba47e 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -8,8 +8,8 @@ mod ssz_container; pub use crate::justified_balances::JustifiedBalances; pub use crate::proto_array::{calculate_committee_fraction, InvalidationOperation}; pub use crate::proto_array_fork_choice::{ - Block, DoNotReOrg, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, - ReOrgThreshold, + Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, ProposerHeadError, + ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; pub use error::Error; diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 6db1ac132f..d376e62e8f 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -250,6 +250,9 @@ pub enum DoNotReOrg { ParentDistance, HeadDistance, ShufflingUnstable, + DisallowedOffset { + offset: u64, + }, JustificationAndFinalizationNotCompetitive, ChainNotFinalizing { epochs_since_finalization: u64, @@ -271,6 +274,9 @@ impl std::fmt::Display for DoNotReOrg { Self::ParentDistance => write!(f, "parent too far from head"), Self::HeadDistance => write!(f, "head too far from current slot"), Self::ShufflingUnstable => write!(f, "shuffling unstable at epoch boundary"), + Self::DisallowedOffset { offset } => { + write!(f, "re-orgs disabled at offset {offset}") + } Self::JustificationAndFinalizationNotCompetitive => { write!(f, "justification or finalization not competitive") } @@ -304,6 +310,31 @@ impl std::fmt::Display for DoNotReOrg { #[serde(transparent)] pub struct ReOrgThreshold(pub u64); +/// New-type for disallowed re-org slots. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct DisallowedReOrgOffsets { + // Vecs are faster than hashmaps for small numbers of items. + offsets: Vec, +} + +impl Default for DisallowedReOrgOffsets { + fn default() -> Self { + DisallowedReOrgOffsets { offsets: vec![0] } + } +} + +impl DisallowedReOrgOffsets { + pub fn new(offsets: Vec) -> Result { + for &offset in &offsets { + if offset >= E::slots_per_epoch() { + return Err(Error::InvalidEpochOffset(offset)); + } + } + Ok(Self { offsets }) + } +} + #[derive(PartialEq)] pub struct ProtoArrayForkChoice { pub(crate) proto_array: ProtoArray, @@ -460,6 +491,7 @@ impl ProtoArrayForkChoice { canonical_head: Hash256, justified_balances: &JustifiedBalances, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result> { let info = self.get_proposer_head_info::( @@ -467,6 +499,7 @@ impl ProtoArrayForkChoice { canonical_head, justified_balances, re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, )?; @@ -501,6 +534,7 @@ impl ProtoArrayForkChoice { canonical_head: Hash256, justified_balances: &JustifiedBalances, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result> { let mut nodes = self @@ -545,6 +579,12 @@ impl ProtoArrayForkChoice { return Err(DoNotReOrg::ShufflingUnstable.into()); } + // Check allowed slot offsets. + let offset = (re_org_block_slot % E::slots_per_epoch()).as_u64(); + if disallowed_offsets.offsets.contains(&offset) { + return Err(DoNotReOrg::DisallowedOffset { offset }.into()); + } + // Check FFG. let ffg_competitive = parent_node.unrealized_justified_checkpoint == head_node.unrealized_justified_checkpoint diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 64646a6c57..c116979366 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2,6 +2,7 @@ use beacon_node::ClientConfig as Config; use crate::exec::{CommandLineTestExec, CompletedTest}; use beacon_node::beacon_chain::chain_config::{ + DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; use eth1::Eth1Endpoint; @@ -1888,6 +1889,10 @@ fn enable_proposer_re_orgs_default() { config.chain.re_org_max_epochs_since_finalization, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, ); + assert_eq!( + config.chain.re_org_cutoff(12), + Duration::from_secs(12) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR + ); }); } @@ -1920,6 +1925,49 @@ fn proposer_re_org_max_epochs_since_finalization() { }); } +#[test] +fn proposer_re_org_cutoff() { + CommandLineTest::new() + .flag("proposer-reorg-cutoff", Some("500")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.re_org_cutoff(12), Duration::from_millis(500)) + }); +} + +#[test] +fn proposer_re_org_disallowed_offsets_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_disallowed_offsets, + DisallowedReOrgOffsets::new::(vec![0]).unwrap() + ) + }); +} + +#[test] +fn proposer_re_org_disallowed_offsets_override() { + CommandLineTest::new() + .flag("--proposer-reorg-disallowed-offsets", Some("1,2,3")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_disallowed_offsets, + DisallowedReOrgOffsets::new::(vec![1, 2, 3]).unwrap() + ) + }); +} + +#[test] +#[should_panic] +fn proposer_re_org_disallowed_offsets_invalid() { + CommandLineTest::new() + .flag("--proposer-reorg-disallowed-offsets", Some("32,33,34")) + .run_with_zero_port(); +} + #[test] fn monitoring_endpoint() { CommandLineTest::new() From a3669abac593e6233709d8a0ae7566d04c0b4cd5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 13 Apr 2023 07:05:02 +0000 Subject: [PATCH 05/16] Avoid processing redundant RPC blocks (#4179) ## Proposed Changes We already make some attempts to avoid processing RPC blocks when a block from the same proposer is already being processed through gossip. This PR strengthens that guarantee by using the existing cache for `observed_block_producers` to inform whether an RPC block's processing should be delayed. --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- .../work_reprocessing_queue.rs | 4 +- .../beacon_processor/worker/sync_methods.rs | 46 +++++++++++++++++++ 3 files changed, 49 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 283dcf96c0..08a88f5db9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -352,7 +352,7 @@ pub struct BeaconChain { /// in recent epochs. pub(crate) observed_sync_aggregators: RwLock>, /// Maintains a record of which validators have proposed blocks for each slot. - pub(crate) observed_block_producers: RwLock>, + pub observed_block_producers: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. pub(crate) observed_voluntary_exits: Mutex>, /// Maintains a record of which validators we've seen proposer slashings for. diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 9f04d99725..427be6d513 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -56,7 +56,7 @@ pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12); /// For how long to queue rpc blocks before sending them back for reprocessing. -pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); +pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(4); /// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that /// we signature-verify blocks before putting them in the queue *should* protect against this, but @@ -521,7 +521,7 @@ impl ReprocessQueue { return; } - // Queue the block for 1/4th of a slot + // Queue the block for 1/3rd of a slot self.rpc_block_delay_queue .insert(rpc_block, QUEUED_RPC_BLOCK_DELAY); } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index e8182a1d5a..61ecc30d41 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -83,6 +83,52 @@ impl Worker { return; } }; + // Check if a block from this proposer is already known. If so, defer processing until later + // to avoid wasting time processing duplicates. + let proposal_already_known = self + .chain + .observed_block_producers + .read() + .proposer_has_been_observed(block.message()) + .map_err(|e| { + error!( + self.log, + "Failed to check observed proposers"; + "error" => ?e, + "source" => "rpc", + "block_root" => %block_root + ); + }) + .unwrap_or(true); + if proposal_already_known { + debug!( + self.log, + "Delaying processing of duplicate RPC block"; + "block_root" => ?block_root, + "proposer" => block.message().proposer_index(), + "slot" => block.slot() + ); + + // Send message to work reprocess queue to retry the block + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + block_root, + block: block.clone(), + process_type, + seen_timestamp, + should_process: true, + }); + + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!( + self.log, + "Failed to inform block import"; + "source" => "rpc", + "block_root" => %block_root + ); + } + return; + } + let slot = block.slot(); let parent_root = block.message().parent_root(); let result = self From 56dba963195b9a0d0831a94013ba4e31aaa7a57b Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Fri, 14 Apr 2023 01:11:45 +0000 Subject: [PATCH 06/16] Update Lighthouse book and some FAQs (#4178) ## Issue Addressed Updated Lighthouse book on Section 2 and added some FAQs ## Proposed Changes All changes are made in the book/src .md files. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. Co-authored-by: chonghe Co-authored-by: Michael Sproul --- book/src/SUMMARY.md | 1 - book/src/docker.md | 41 +++++++++++++------------ book/src/faq.md | 50 ++++++++++++++++++++++++++++++- book/src/installation-binaries.md | 21 +++++++------ book/src/installation-source.md | 45 +++++++++++++++++++++++----- book/src/installation.md | 29 ++++++++++-------- book/src/pi.md | 27 +++++++++-------- book/src/system-requirements.md | 23 -------------- 8 files changed, 149 insertions(+), 88 deletions(-) delete mode 100644 book/src/system-requirements.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7def1821dd..ff5c1e9805 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,7 +2,6 @@ * [Introduction](./intro.md) * [Installation](./installation.md) - * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) * [Docker](./docker.md) * [Build from Source](./installation-source.md) diff --git a/book/src/docker.md b/book/src/docker.md index 7484f9f525..d67b084da6 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -16,21 +16,18 @@ way to run Lighthouse without building the image yourself. Obtain the latest image with: ```bash -$ docker pull sigp/lighthouse +docker pull sigp/lighthouse ``` Download and test the image with: ```bash -$ docker run sigp/lighthouse lighthouse --version +docker run sigp/lighthouse lighthouse --version ``` If you can see the latest [Lighthouse release](https://github.com/sigp/lighthouse/releases) version (see example below), then you've successfully installed Lighthouse via Docker. -> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker -> Images](#available-docker-images) below. - ### Example Version Output ``` @@ -38,6 +35,9 @@ Lighthouse vx.x.xx-xxxxxxxxx BLS Library: xxxx-xxxxxxx ``` +> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker +> Images](#available-docker-images) below. + ### Available Docker Images There are several images available on Docker Hub. @@ -47,11 +47,10 @@ Lighthouse with optimizations enabled. If you are running on older hardware then `latest` image bundles a _portable_ version of Lighthouse which is slower but with better hardware compatibility (see [Portability](./installation-binaries.md#portability)). -To install a specific tag (in this case `latest-modern`) add the tag name to your `docker` commands -like so: +To install a specific tag (in this case `latest-modern`), add the tag name to your `docker` commands: ``` -$ docker pull sigp/lighthouse:latest-modern +docker pull sigp/lighthouse:latest-modern ``` Image tags follow this format: @@ -65,17 +64,17 @@ The `version` is: * `vX.Y.Z` for a tagged Lighthouse release, e.g. `v2.1.1` * `latest` for the `stable` branch (latest release) or `unstable` branch -The `stability` is: - -* `-unstable` for the `unstable` branch -* empty for a tagged release or the `stable` branch - The `arch` is: * `-amd64` for x86_64, e.g. Intel, AMD * `-arm64` for aarch64, e.g. Raspberry Pi 4 * empty for a multi-arch image (works on either `amd64` or `arm64` platforms) +The `stability` is: + +* `-unstable` for the `unstable` branch +* empty for a tagged release or the `stable` branch + The `modernity` is: * `-modern` for optimized builds @@ -99,13 +98,13 @@ To build the image from source, navigate to the root of the repository and run: ```bash -$ docker build . -t lighthouse:local +docker build . -t lighthouse:local ``` The build will likely take several minutes. Once it's built, test it with: ```bash -$ docker run lighthouse:local lighthouse --help +docker run lighthouse:local lighthouse --help ``` ## Using the Docker image @@ -113,12 +112,12 @@ $ docker run lighthouse:local lighthouse --help You can run a Docker beacon node with the following command: ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 ``` -> To join the Prater testnet, use `--network prater` instead. +> To join the Goerli testnet, use `--network goerli` instead. -> The `-p` and `-v` and values are described below. +> The `-v` (Volumes) and `-p` (Ports) and values are described below. ### Volumes @@ -131,7 +130,7 @@ The following example runs a beacon node with the data directory mapped to the users home directory: ```bash -$ docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon +docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon ``` ### Ports @@ -140,14 +139,14 @@ In order to be a good peer and serve other peers you should expose port `9000` f Use the `-p` flag to do this: ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon +docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon ``` If you use the `--http` flag you may also want to expose the HTTP port with `-p 127.0.0.1:5052:5052`. ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 ``` [docker_hub]: https://hub.docker.com/repository/docker/sigp/lighthouse/ diff --git a/book/src/faq.md b/book/src/faq.md index 43de40eee3..b42e197a00 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -9,6 +9,11 @@ - [What is "Syncing deposit contract block cache"?](#what-is-syncing-deposit-contract-block-cache) - [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup) - [How can I monitor my validators?](#how-can-i-monitor-my-validators) +- [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#i-see-beacon-logs-showing-warn-execution-engine-called-failed-what-should-i-do) +- [How do I check or update my withdrawal credentials?](#how-do-i-check-or-update-my-withdrawal-credentials) +- [I am missing attestations. Why?](#i-am-missing-attestations-why) +- [Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?](#sometimes-i-miss-the-attestation-head-vote-resulting-in-penalty-is-this-normal) +- [My beacon node is stuck at downloading historical block using checkpoing sync. What can I do?](#my-beacon-node-is-stuck-at-downloading-historical-block-using-checkpoing-sync-what-can-i-do) ### Why does it take so long for a validator to be activated? @@ -185,4 +190,47 @@ However, there are some components which can be configured with redundancy. See Apart from using block explorers, you may use the "Validator Monitor" built into Lighthouse which provides logging and Prometheus/Grafana metrics for individual validators. See [Validator -Monitoring](./validator-monitoring.md) for more information. +Monitoring](./validator-monitoring.md) for more information. Lighthouse has also developed Lighthouse UI (Siren) to monitor performance, see [Lighthouse UI (Siren)](./lighthouse-ui.md). + +### I see beacon logs showing `WARN: Execution engine called failed`, what should I do? + +The `WARN Execution engine called failed` log is shown when the beacon node cannot reach the execution engine. When this warning occurs, it will be followed by a detailed message. A frequently encountered example of the error message is: + +`error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` + +which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. There are a few reasons why this can occur: +1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. +1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. +1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. + +If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are: +- Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. +- The service file is not stopped properly. To overcome this, make sure that the process is stop properly, e.g., during client updates. +- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. When this occurs, the log file will show `Main process exited, code=killed, status=9/KILL`. You can also run `sudo journalctl -a --since "18 hours ago" | grep -i "killed process` to confirm that the execution client has been killed due to oom. If you are using geth as the execution client, a short term solution is to reduce the resources used, for example: (1) reduce the cache by adding the flag `--cache 2048` (2) connect to less peers using the flag `--maxpeers 10`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. + + +### How do I check or update my withdrawal credentials? +Withdrawals will be available after the Capella/Shanghai upgrades on 12th April 2023. To check that if you are eligible for withdrawals, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: +- `withdrawals enabled` means you will automatically receive withdrawals to the withdrawal address that you set. +- `withdrawals not enabled` means you will need to update your withdrawal credentials from `0x00` type to `0x01` type. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). + +For the case of `withdrawals not enabled`, you can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your rewards in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the rewards be withdrawn to the withdrawal address. + + +### I am missing attestations. Why? +The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that: +- the clock is synced +- the computer has sufficient resources and is not overloaded +- the internet is working well +- you have sufficient peers + +You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurance. + +### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? + +In general it is unavoiadable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance. + + +### My beacon node is stuck at downloading historical block using checkpoing sync. What can I do? + +Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the port 9000 TCP/UDP to increase peer count. \ No newline at end of file diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 2365ea7ed7..30bf03e14e 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -23,21 +23,24 @@ For details, see [Portability](#portability). ## Usage Each binary is contained in a `.tar.gz` archive. For this example, lets assume the user needs -a portable `x86_64` binary. +a `x86_64` binary. ### Steps 1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and select the latest release. -1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu-portable.tar.gz` binary. -1. Extract the archive: - 1. `cd Downloads` - 1. `tar -xvf lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` +1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` binary. For example, to obtain the binary file for v4.0.1 (the latest version at the time of writing), a user can run the following commands in a linux terminal: + ```bash + cd ~ + curl -LO https://github.com/sigp/lighthouse/releases/download/v4.0.1/lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz + tar -xvf lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz + ``` 1. Test the binary with `./lighthouse --version` (it should print the version). -1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. - - E.g., `cp lighthouse /usr/bin` +1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. For example, to copy `lighthouse` from the current directory to `usr/bin`, run `sudo cp lighthouse /usr/bin`. -> Windows users will need to execute the commands in Step 3 from PowerShell. + + +> Windows users will need to execute the commands in Step 2 from PowerShell. ## Portability @@ -64,4 +67,4 @@ WARN CPU seems incompatible with optimized Lighthouse build, advice: If you get On some VPS providers, the virtualization can make it appear as if CPU features are not available, even when they are. In this case you might see the warning above, but so long as the client -continues to function it's nothing to worry about. +continues to function, it's nothing to worry about. diff --git a/book/src/installation-source.md b/book/src/installation-source.md index c89dd1add4..b9c9df163d 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -5,8 +5,20 @@ the instructions below, and then proceed to [Building Lighthouse](#build-lightho ## Dependencies -First, **install Rust** using [rustup](https://rustup.rs/). The rustup installer provides an easy way -to update the Rust compiler, and works on all platforms. +First, **install Rust** using [rustup](https://rustup.rs/): + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +The rustup installer provides an easy way to update the Rust compiler, and works on all platforms. + +> Tips: +> +> - During installation, when prompted, enter `1` for the default installation. +> - After Rust installation completes, try running `cargo version` . If it cannot +> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. +> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. With Rust installed, follow the instructions below to install dependencies relevant to your operating system. @@ -19,10 +31,17 @@ Install the following packages: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler ``` +> Tips: +> +> - If there are difficulties, try updating the package manager with `sudo apt +> update`. + > Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories > of Ubuntu 18.04 or earlier. On these distributions CMake can still be installed via PPA: > [https://apt.kitware.com/](https://apt.kitware.com) +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### macOS 1. Install the [Homebrew][] package manager. @@ -39,10 +58,19 @@ brew install protobuf [Homebrew]: https://brew.sh/ +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### Windows -1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). +1. Install [Git](https://git-scm.com/download/win). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. + > Tips: + > - Use PowerShell to install. In Windows, search for PowerShell and run as administrator. + > - You must ensure `Get-ExecutionPolicy` is not Restricted. To test this, run `Get-ExecutionPolicy` in PowerShell. If it returns `restricted`, then run `Set-ExecutionPolicy AllSigned`, and then run + ```bash + Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + ``` + > - To verify that Chocolatey is ready, run `choco` and it should return the version. 1. Install Make, CMake, LLVM and protoc using Chocolatey: ``` @@ -67,6 +95,8 @@ should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)]( [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about +After this, you are ready to [build Lighthouse](#build-lighthouse). + ## Build Lighthouse Once you have Rust and the build dependencies you're ready to build Lighthouse: @@ -136,7 +166,7 @@ Commonly used features include: * `spec-minimal`: support for the minimal preset (useful for testing). Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features` -argument for `cargo`, which can plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. +argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. E.g. ``` @@ -171,12 +201,11 @@ PROFILE=maxperf make Lighthouse will be installed to `CARGO_HOME` or `$HOME/.cargo`. This directory needs to be on your `PATH` before you can run `$ lighthouse`. -See ["Configuring the `PATH` environment variable" -(rust-lang.org)](https://www.rust-lang.org/tools/install) for more information. +See ["Configuring the `PATH` environment variable"](https://www.rust-lang.org/tools/install) for more information. ### Compilation error -Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `rustup update`. +Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply run `rustup update`. If you can't install the latest version of Rust you can instead compile using the Minimum Supported Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's @@ -185,7 +214,7 @@ Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of memory during compilation. If you are on a resource-constrained device you can look into [cross compilation](./cross-compiling.md), or use a [pre-built -binary](./installation-binaries.md). +binary](https://github.com/sigp/lighthouse/releases). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. diff --git a/book/src/installation.md b/book/src/installation.md index bc546e0987..627326d2a4 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -8,24 +8,27 @@ There are three core methods to obtain the Lighthouse application: - [Docker images](./docker.md). - [Building from source](./installation-source.md). -Community-maintained additional installation methods: - -- [Homebrew package](./homebrew.md). -- Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), - [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). - Additionally, there are two extra guides for specific uses: - [Raspberry Pi 4 guide](./pi.md). - [Cross-compiling guide for developers](./cross-compiling.md). -## Minimum System Requirements +There are also community-maintained installation methods: -* Dual-core CPU, 2015 or newer -* 8 GB RAM -* 128 GB solid state storage -* 10 Mb/s download, 5 Mb/s upload broadband connection +- [Homebrew package](./homebrew.md). +- Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), + [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). -For more information see [System Requirements](./system-requirements.md). -[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about + +## Recommended System Requirements + +Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. + +After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): + + +* CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer +* Memory: 16 GB RAM or more +* Storage: 2 TB solid state storage +* Network: 100 Mb/s download, 20 Mb/s upload broadband connection diff --git a/book/src/pi.md b/book/src/pi.md index 24796d394e..d8d154d765 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -12,18 +12,18 @@ desktop) may be convenient.* ### 1. Install Ubuntu -Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). - -**A 64-bit version is required** and latest version is recommended (Ubuntu -20.04 LTS was the latest at the time of writing). +Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). **A 64-bit version is required** A graphical environment is not required in order to use Lighthouse. Only the terminal and an Internet connection are necessary. ### 2. Install Packages -Install the [Ubuntu Dependencies](installation-source.md#ubuntu). -(I.e., run the `sudo apt install ...` command at that link). +Install the Ubuntu dependencies: + +```bash +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +``` > Tips: > @@ -32,15 +32,18 @@ Install the [Ubuntu Dependencies](installation-source.md#ubuntu). ### 3. Install Rust -Install Rust as per [rustup](https://rustup.rs/). (I.e., run the `curl ... ` -command). +Install Rust as per [rustup](https://rustup.rs/): + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` > Tips: > -> - When prompted, enter `1` for the default installation. -> - Try running `cargo version` after Rust installation completes. If it cannot -> be found, run `source $HOME/.cargo/env`. -> - It's generally advised to append `source $HOME/.cargo/env` to `~/.bashrc`. +> - During installation, when prompted, enter `1` for the default installation. +> - After Rust installation completes, try running `cargo version` . If it cannot +> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. +> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. ### 4. Install Lighthouse diff --git a/book/src/system-requirements.md b/book/src/system-requirements.md deleted file mode 100644 index 0c51d07cce..0000000000 --- a/book/src/system-requirements.md +++ /dev/null @@ -1,23 +0,0 @@ -# System Requirements - -Lighthouse is able to run on most low to mid-range consumer hardware, but will perform best when -provided with ample system resources. The following system requirements are for running a beacon -node and a validator client with a modest number of validator keys (less than 100). - -## Minimum - -* Dual-core CPU, 2015 or newer -* 8 GB RAM -* 128 GB solid state storage -* 10 Mb/s download, 5 Mb/s upload broadband connection - -During smooth network conditions, Lighthouse's database will fit within 15 GB, but in case of a long -period of non-finality, it is **strongly recommended** that at least 128 GB is available. - -## Recommended - -* Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer -* 16 GB RAM -* 256 GB solid state storage -* 100 Mb/s download, 20 Mb/s upload broadband connection - From 2b3084f5785dfb48c809d4838757341db4142eef Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 14 Apr 2023 01:11:46 +0000 Subject: [PATCH 07/16] Use head state for exit verification (#4183) ## Issue Addressed NA ## Proposed Changes Similar to #4181 but without the version bump and a more nuanced fix. Patches the high CPU usage seen after the Capella fork which was caused by processing exits when there are skip slots. ## Additional Info ~~This is an imperfect solution that will cause us to drop some exits at the fork boundary. This is tracked at #4184.~~ --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 ++-- .../beacon_chain/src/observed_operations.rs | 37 ++++++++++++++++--- beacon_node/operation_pool/src/lib.rs | 3 +- consensus/state_processing/src/lib.rs | 2 +- .../process_operations.rs | 3 +- .../src/per_block_processing/tests.rs | 30 ++++++++++++--- .../src/per_block_processing/verify_exit.rs | 12 +++--- .../state_processing/src/verify_operation.rs | 34 ++++++++++++++++- 8 files changed, 106 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 08a88f5db9..6b6379d62d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2206,12 +2206,14 @@ impl BeaconChain { &self, exit: SignedVoluntaryExit, ) -> Result, Error> { - // NOTE: this could be more efficient if it avoided cloning the head state - let wall_clock_state = self.wall_clock_state()?; + let head_snapshot = self.head().snapshot; + let head_state = &head_snapshot.beacon_state; + let wall_clock_epoch = self.epoch()?; + Ok(self .observed_voluntary_exits .lock() - .verify_and_observe(exit, &wall_clock_state, &self.spec) + .verify_and_observe_at(exit, wall_clock_epoch, head_state, &self.spec) .map(|exit| { // this method is called for both API and gossip exits, so this covers all exit events if let Some(event_handler) = self.event_handler.as_ref() { diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 6e53373939..4121111b3e 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,11 +1,11 @@ use derivative::Derivative; use smallvec::{smallvec, SmallVec}; use ssz::{Decode, Encode}; -use state_processing::{SigVerifiedOp, VerifyOperation}; +use state_processing::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; use std::collections::HashSet; use std::marker::PhantomData; use types::{ - AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing, + AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, ForkName, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, }; @@ -87,12 +87,16 @@ impl ObservableOperation for SignedBlsToExecutionChange { } impl, E: EthSpec> ObservedOperations { - pub fn verify_and_observe( + pub fn verify_and_observe_parametric( &mut self, op: T, + validate: F, head_state: &BeaconState, spec: &ChainSpec, - ) -> Result, T::Error> { + ) -> Result, T::Error> + where + F: Fn(T) -> Result, T::Error>, + { self.reset_at_fork_boundary(head_state.slot(), spec); let observed_validator_indices = &mut self.observed_validator_indices; @@ -112,7 +116,7 @@ impl, E: EthSpec> ObservedOperations { } // Validate the op using operation-specific logic (`verify_attester_slashing`, etc). - let verified_op = op.validate(head_state, spec)?; + let verified_op = validate(op)?; // Add the relevant indices to the set of known indices to prevent processing of duplicates // in the future. @@ -121,6 +125,16 @@ impl, E: EthSpec> ObservedOperations { Ok(ObservationOutcome::New(verified_op)) } + pub fn verify_and_observe( + &mut self, + op: T, + head_state: &BeaconState, + spec: &ChainSpec, + ) -> Result, T::Error> { + let validate = |op: T| op.validate(head_state, spec); + self.verify_and_observe_parametric(op, validate, head_state, spec) + } + /// Reset the cache when crossing a fork boundary. /// /// This prevents an attacker from crafting a self-slashing which is only valid before the fork @@ -140,3 +154,16 @@ impl, E: EthSpec> ObservedOperations { } } } + +impl + VerifyOperationAt, E: EthSpec> ObservedOperations { + pub fn verify_and_observe_at( + &mut self, + op: T, + verify_at_epoch: Epoch, + head_state: &BeaconState, + spec: &ChainSpec, + ) -> Result, T::Error> { + let validate = |op: T| op.validate_at(head_state, verify_at_epoch, spec); + self.verify_and_observe_parametric(op, validate, head_state, spec) + } +} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index c5be4f0a61..24c0623f5c 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -497,7 +497,8 @@ impl OperationPool { |exit| { filter(exit.as_inner()) && exit.signature_is_still_valid(&state.fork()) - && verify_exit(state, exit.as_inner(), VerifySignatures::False, spec).is_ok() + && verify_exit(state, None, exit.as_inner(), VerifySignatures::False, spec) + .is_ok() }, |exit| exit.as_inner().clone(), T::MaxVoluntaryExits::to_usize(), diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index e4f36bedd8..9641e8f96e 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -41,4 +41,4 @@ pub use per_epoch_processing::{ errors::EpochProcessingError, process_epoch as per_epoch_processing, }; pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError}; -pub use verify_operation::{SigVerifiedOp, VerifyOperation}; +pub use verify_operation::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 7d04cad90b..4bee596615 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -282,7 +282,8 @@ pub fn process_exits( // Verify and apply each exit in series. We iterate in series because higher-index exits may // become invalid due to the application of lower-index ones. for (i, exit) in voluntary_exits.iter().enumerate() { - verify_exit(state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?; + verify_exit(state, None, exit, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; initiate_validator_exit(state, exit.message.validator_index as usize, spec)?; } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index b7d28832db..6eabbf0d44 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -978,8 +978,14 @@ async fn fork_spanning_exit() { let head = harness.chain.canonical_head.cached_head(); let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() < spec.altair_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect("phase0 exit verifies against phase0 state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect("phase0 exit verifies against phase0 state"); /* * Ensure the exit verifies after Altair. @@ -992,8 +998,14 @@ async fn fork_spanning_exit() { let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() >= spec.altair_fork_epoch.unwrap()); assert!(head_state.current_epoch() < spec.bellatrix_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect("phase0 exit verifies against altair state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect("phase0 exit verifies against altair state"); /* * Ensure the exit no longer verifies after Bellatrix. @@ -1009,6 +1021,12 @@ async fn fork_spanning_exit() { let head = harness.chain.canonical_head.cached_head(); let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() >= spec.bellatrix_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect_err("phase0 exit does not verify against bellatrix state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect_err("phase0 exit does not verify against bellatrix state"); } diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index f17e5fcd23..9e9282912d 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -20,10 +20,12 @@ fn error(reason: ExitInvalid) -> BlockOperationError { /// Spec v0.12.1 pub fn verify_exit( state: &BeaconState, + current_epoch: Option, signed_exit: &SignedVoluntaryExit, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<()> { + let current_epoch = current_epoch.unwrap_or(state.current_epoch()); let exit = &signed_exit.message; let validator = state @@ -33,7 +35,7 @@ pub fn verify_exit( // Verify the validator is active. verify!( - validator.is_active_at(state.current_epoch()), + validator.is_active_at(current_epoch), ExitInvalid::NotActive(exit.validator_index) ); @@ -45,9 +47,9 @@ pub fn verify_exit( // Exits must specify an epoch when they become valid; they are not valid before then. verify!( - state.current_epoch() >= exit.epoch, + current_epoch >= exit.epoch, ExitInvalid::FutureEpoch { - state: state.current_epoch(), + state: current_epoch, exit: exit.epoch } ); @@ -57,9 +59,9 @@ pub fn verify_exit( .activation_epoch .safe_add(spec.shard_committee_period)?; verify!( - state.current_epoch() >= earliest_exit_epoch, + current_epoch >= earliest_exit_epoch, ExitInvalid::TooYoungToExit { - current_epoch: state.current_epoch(), + current_epoch, earliest_exit_epoch, } ); diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 50ac2ff3de..864844080f 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -134,7 +134,7 @@ impl VerifyOperation for SignedVoluntaryExit { state: &BeaconState, spec: &ChainSpec, ) -> Result, Self::Error> { - verify_exit(state, &self, VerifySignatures::True, spec)?; + verify_exit(state, None, &self, VerifySignatures::True, spec)?; Ok(SigVerifiedOp::new(self, state)) } @@ -205,3 +205,35 @@ impl VerifyOperation for SignedBlsToExecutionChange { smallvec![] } } + +/// Trait for operations that can be verified and transformed into a +/// `SigVerifiedOp`. +/// +/// The `At` suffix indicates that we can specify a particular epoch at which to +/// verify the operation. +pub trait VerifyOperationAt: VerifyOperation + Sized { + fn validate_at( + self, + state: &BeaconState, + validate_at_epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Self::Error>; +} + +impl VerifyOperationAt for SignedVoluntaryExit { + fn validate_at( + self, + state: &BeaconState, + validate_at_epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Self::Error> { + verify_exit( + state, + Some(validate_at_epoch), + &self, + VerifySignatures::True, + spec, + )?; + Ok(SigVerifiedOp::new(self, state)) + } +} From dd124b2d6804d02e4e221f29387a56775acccd08 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 14 Apr 2023 06:37:16 +0000 Subject: [PATCH 08/16] Address observed proposers behaviour (#4192) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed NA ## Proposed Changes Apply two changes to code introduced in #4179: 1. Remove the `ERRO` log for when we error on `proposer_has_been_observed()`. We were seeing a lot of this in our logs for finalized blocks and it's a bit noisy. 1. Use `false` rather than `true` for `proposal_already_known` when there is an error. If a block raises an error in `proposer_has_been_observed()` then the block must be invalid, so we should process (and reject) it now rather than queuing it. For reference, here is one of the offending `ERRO` logs: ``` ERRO Failed to check observed proposers block_root: 0x5845…878e, source: rpc, error: FinalizedBlock { slot: Slot(5410983), finalized_slot: Slot(5411232) } ``` ## Additional Info NA --- beacon_node/beacon_chain/src/lib.rs | 2 +- .../beacon_processor/worker/sync_methods.rs | 23 ++++++++----------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index af4780e46e..be1522a3b8 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -32,7 +32,7 @@ pub mod migrate; mod naive_aggregation_pool; mod observed_aggregates; mod observed_attesters; -mod observed_block_producers; +pub mod observed_block_producers; pub mod observed_operations; pub mod otb_verification_service; mod persisted_beacon_chain; diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 61ecc30d41..6faf7ebd37 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -9,8 +9,8 @@ use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::CountUnrealized; use beacon_chain::{ - BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, - NotifyExecutionLayer, + observed_block_producers::Error as ObserveError, BeaconChainError, BeaconChainTypes, + BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; @@ -85,21 +85,18 @@ impl Worker { }; // Check if a block from this proposer is already known. If so, defer processing until later // to avoid wasting time processing duplicates. - let proposal_already_known = self + let proposal_already_known = match self .chain .observed_block_producers .read() .proposer_has_been_observed(block.message()) - .map_err(|e| { - error!( - self.log, - "Failed to check observed proposers"; - "error" => ?e, - "source" => "rpc", - "block_root" => %block_root - ); - }) - .unwrap_or(true); + { + Ok(is_observed) => is_observed, + // Both of these blocks will be rejected, so reject them now rather + // than re-queuing them. + Err(ObserveError::FinalizedBlock { .. }) + | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, + }; if proposal_already_known { debug!( self.log, From 1d92e3f77c5900ed96bac4fae23cad02d885c74a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 18 Apr 2023 02:47:35 +0000 Subject: [PATCH 09/16] Use efficient payload reconstruction for HTTP API (#4102) ## Proposed Changes Builds on #4028 to use the new payload bodies methods in the HTTP API as well. ## Caveats The payloads by range method only works for the finalized chain, so it can't be used in the execution engine integration tests because we try to reconstruct unfinalized payloads there. --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 55 +++++++++++++++++-- .../src/test_rig.rs | 3 +- 3 files changed, 54 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6b6379d62d..0165c54dc3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1043,7 +1043,7 @@ impl BeaconChain { .execution_layer .as_ref() .ok_or(Error::ExecutionLayerMissing)? - .get_payload_by_block_hash(exec_block_hash, fork) + .get_payload_for_header(&execution_payload_header, fork) .await .map_err(|e| { Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e)) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2c2d8c7dce..dd956d1d75 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -103,6 +103,8 @@ pub enum Error { transactions_root: Hash256, }, InvalidJWTSecret(String), + InvalidForkForPayload, + InvalidPayloadBody(String), BeaconStateError(BeaconStateError), } @@ -1602,14 +1604,59 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - pub async fn get_payload_by_block_hash( + /// Fetch a full payload from the execution node. + /// + /// This will fail if the payload is not from the finalized portion of the chain. + pub async fn get_payload_for_header( + &self, + header: &ExecutionPayloadHeader, + fork: ForkName, + ) -> Result>, Error> { + let hash = header.block_hash(); + let block_number = header.block_number(); + + // Handle default payload body. + if header.block_hash() == ExecutionBlockHash::zero() { + let payload = match fork { + ForkName::Merge => ExecutionPayloadMerge::default().into(), + ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Base | ForkName::Altair => { + return Err(Error::InvalidForkForPayload); + } + }; + return Ok(Some(payload)); + } + + // Use efficient payload bodies by range method if supported. + let capabilities = self.get_engine_capabilities(None).await?; + if capabilities.get_payload_bodies_by_range_v1 { + let mut payload_bodies = self.get_payload_bodies_by_range(block_number, 1).await?; + + if payload_bodies.len() != 1 { + return Ok(None); + } + + let opt_payload_body = payload_bodies.pop().flatten(); + opt_payload_body + .map(|body| { + body.to_payload(header.clone()) + .map_err(Error::InvalidPayloadBody) + }) + .transpose() + } else { + // Fall back to eth_blockByHash. + self.get_payload_by_hash_legacy(hash, fork).await + } + } + + pub async fn get_payload_by_hash_legacy( &self, hash: ExecutionBlockHash, fork: ForkName, ) -> Result>, Error> { self.engine() .request(|engine| async move { - self.get_payload_by_block_hash_from_engine(engine, hash, fork) + self.get_payload_by_hash_from_engine(engine, hash, fork) .await }) .await @@ -1617,7 +1664,7 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - async fn get_payload_by_block_hash_from_engine( + async fn get_payload_by_hash_from_engine( &self, engine: &Engine, hash: ExecutionBlockHash, @@ -1630,7 +1677,7 @@ impl ExecutionLayer { ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( - format!("called get_payload_by_block_hash_from_engine with {}", fork), + format!("called get_payload_by_hash_from_engine with {}", fork), )), }; } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index ff333332ba..726019a848 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -626,9 +626,10 @@ async fn check_payload_reconstruction( ee: &ExecutionPair, payload: &ExecutionPayload, ) { + // check via legacy eth_getBlockByHash let reconstructed = ee .execution_layer - .get_payload_by_block_hash(payload.block_hash(), payload.fork_name()) + .get_payload_by_hash_legacy(payload.block_hash(), payload.fork_name()) .await .unwrap() .unwrap(); From e9a7316f1de113d118f1aa16856ae97e181ceb82 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 18 Apr 2023 02:47:36 +0000 Subject: [PATCH 10/16] Set user agent on requests to builder (#4199) ## Issue Addressed Closes #4185 ## Proposed Changes - Set user agent to `Lighthouse/vX.Y.Z-` by default - Allow tweaking user agent via `--builder-user-agent "agent"` --- Cargo.lock | 1 + beacon_node/builder_client/Cargo.toml | 1 + beacon_node/builder_client/src/lib.rs | 19 ++++++++------ beacon_node/execution_layer/src/lib.rs | 16 +++++++++--- beacon_node/src/cli.rs | 9 +++++++ beacon_node/src/config.rs | 3 +++ lighthouse/tests/beacon_node.rs | 34 ++++++++++++++++++++++++++ 7 files changed, 71 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08b7e2a808..47fab8c0f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -863,6 +863,7 @@ name = "builder_client" version = "0.1.0" dependencies = [ "eth2", + "lighthouse_version", "reqwest", "sensitive_url", "serde", diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 48ac0300c9..b79fc5e407 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -10,3 +10,4 @@ sensitive_url = { path = "../../common/sensitive_url" } eth2 = { path = "../../common/eth2" } serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" +lighthouse_version = { path = "../../common/lighthouse_version" } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index fecf6512ac..255c2fdd19 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -17,6 +17,9 @@ pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; /// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20). pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000; +/// Default user agent for HTTP requests. +pub const DEFAULT_USER_AGENT: &str = lighthouse_version::VERSION; + #[derive(Clone)] pub struct Timeouts { get_header: Duration, @@ -41,23 +44,23 @@ pub struct BuilderHttpClient { client: reqwest::Client, server: SensitiveUrl, timeouts: Timeouts, + user_agent: String, } impl BuilderHttpClient { - pub fn new(server: SensitiveUrl) -> Result { + pub fn new(server: SensitiveUrl, user_agent: Option) -> Result { + let user_agent = user_agent.unwrap_or(DEFAULT_USER_AGENT.to_string()); + let client = reqwest::Client::builder().user_agent(&user_agent).build()?; Ok(Self { - client: reqwest::Client::new(), + client, server, timeouts: Timeouts::default(), + user_agent, }) } - pub fn new_with_timeouts(server: SensitiveUrl, timeouts: Timeouts) -> Result { - Ok(Self { - client: reqwest::Client::new(), - server, - timeouts, - }) + pub fn get_user_agent(&self) -> &str { + &self.user_agent } async fn get_with_timeout( diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index dd956d1d75..09be379d24 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -230,6 +230,8 @@ pub struct Config { pub execution_endpoints: Vec, /// Endpoint urls for services providing the builder api. pub builder_url: Option, + /// User agent to send with requests to the builder API. + pub builder_user_agent: Option, /// JWT secrets for the above endpoints running the engine api. pub secret_files: Vec, /// The default fee recipient to use on the beacon node if none if provided from @@ -260,6 +262,7 @@ impl ExecutionLayer { let Config { execution_endpoints: urls, builder_url, + builder_user_agent, secret_files, suggested_fee_recipient, jwt_id, @@ -320,12 +323,17 @@ impl ExecutionLayer { let builder = builder_url .map(|url| { - let builder_client = BuilderHttpClient::new(url.clone()).map_err(Error::Builder); - info!(log, + let builder_client = BuilderHttpClient::new(url.clone(), builder_user_agent) + .map_err(Error::Builder)?; + + info!( + log, "Connected to external block builder"; "builder_url" => ?url, - "builder_profit_threshold" => builder_profit_threshold); - builder_client + "builder_profit_threshold" => builder_profit_threshold, + "local_user_agent" => builder_client.get_user_agent(), + ); + Ok::<_, Error>(builder_client) }) .transpose()?; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 8a5c33ac0f..25521ec242 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1000,6 +1000,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("0") .takes_value(true) ) + .arg( + Arg::with_name("builder-user-agent") + .long("builder-user-agent") + .value_name("STRING") + .help("The HTTP user agent to send alongside requests to the builder URL. The \ + default is Lighthouse's version string.") + .requires("builder") + .takes_value(true) + ) .arg( Arg::with_name("count-unrealized") .long("count-unrealized") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 55664897e8..8cc38a534b 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -329,6 +329,9 @@ pub fn get_config( let payload_builder = parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; el_config.builder_url = Some(payload_builder); + + el_config.builder_user_agent = + clap_utils::parse_optional(cli_args, "builder-user-agent")?; } // Set config values from parse values. diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index c116979366..9dd67eadc6 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -716,6 +716,40 @@ fn builder_fallback_flags() { ); } +#[test] +fn builder_user_agent() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert_eq!( + config.execution_layer.as_ref().unwrap().builder_user_agent, + None + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-user-agent"), + Some("anon"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_user_agent + .as_ref() + .unwrap(), + "anon" + ); + }, + ); +} + fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { use sensitive_url::SensitiveUrl; From 434386774eb99df3c4c479b484001290c28f89cb Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 18 Apr 2023 02:47:37 +0000 Subject: [PATCH 11/16] Bump Rust version (MSRV) (#4204) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed There was a [`VecDeque` bug](https://github.com/rust-lang/rust/issues/108453) in some recent versions of the Rust standard library (1.67.0 & 1.67.1) that could cause Lighthouse to panic (reported by `@Sea Monkey` on discord). See full logs below. The issue was likely introduced in Rust 1.67.0 and [fixed](https://github.com/rust-lang/rust/pull/108475) in 1.68, and we were able to reproduce the panic ourselves using [@michaelsproul's fuzz tests](https://github.com/michaelsproul/lighthouse/blob/fuzz-lru-time-cache/beacon_node/lighthouse_network/src/peer_manager/fuzz.rs#L111) on both Rust 1.67.0 and 1.67.1. Users that uses our Docker images or binaries are unlikely affected, as our Docker images were built with `1.66`, and latest binaries were built with latest stable (`1.68.2`). It likely impacts user that builds from source using Rust versions 1.67.x. ## Proposed Changes Bump Rust version (MSRV) to latest stable `1.68.2`. ## Additional Info From `@Sea Monkey` on Lighthouse Discord: > Crash on goerli using `unstable` `dd124b2d6804d02e4e221f29387a56775acccd08` ``` thread 'tokio-runtime-worker' panicked at 'Key must exist', /mnt/goerli/goerli/lighthouse/common/lru_cache/src/time.rs:68:28 stack backtrace: Apr 15 09:37:36.993 WARN Peer sent invalid block in single block lookup, peer_id: 16Uiu2HAm6ZuyJpVpR6y51X4Enbp8EhRBqGycQsDMPX7e5XfPYznG, error: WouldRevertFinalizedSlot { block_slot: Slot(5420212), finalized_slot: Slot(5420224) }, root: 0x10f6…3165, service: sync 0: rust_begin_unwind at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:575:5 1: core::panicking::panic_fmt at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/panicking.rs:64:14 2: core::panicking::panic_display at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/panicking.rs:135:5 3: core::panicking::panic_str at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/panicking.rs:119:5 4: core::option::expect_failed at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/option.rs:1879:5 5: lru_cache::time::LRUTimeCache::raw_remove 6: lighthouse_network::peer_manager::PeerManager::handle_ban_operation 7: lighthouse_network::peer_manager::PeerManager::handle_score_action 8: lighthouse_network::peer_manager::PeerManager::report_peer 9: network::service::NetworkService::spawn_service::{{closure}} 10: as core::future::future::Future>::poll 11: as core::future::future::Future>::poll 12: ::Output> as core::future::future::Future>::poll 13: tokio::loom::std::unsafe_cell::UnsafeCell::with_mut 14: tokio::runtime::task::core::Core::poll 15: tokio::runtime::task::harness::Harness::poll 16: tokio::runtime::scheduler::multi_thread::worker::Context::run_task 17: tokio::runtime::scheduler::multi_thread::worker::Context::run 18: tokio::macros::scoped_tls::ScopedKey::set 19: tokio::runtime::scheduler::multi_thread::worker::run 20: tokio::loom::std::unsafe_cell::UnsafeCell::with_mut 21: tokio::runtime::task::core::Core::poll 22: tokio::runtime::task::harness::Harness::poll 23: tokio::runtime::blocking::pool::Inner::run note: Some details are omitted, run with `RUST_BACKTRACE=full` for a verbose backtrace. Apr 15 09:37:37.069 INFO Saved DHT state service: network Apr 15 09:37:37.070 INFO Network service shutdown service: network Apr 15 09:37:37.132 CRIT Task panic. This is a bug! advice: Please check above for a backtrace and notify the developers, message: , task_name: network Apr 15 09:37:37.132 INFO Internal shutdown received reason: Panic (fatal error) Apr 15 09:37:37.133 INFO Shutting down.. reason: Failure("Panic (fatal error)") Apr 15 09:37:37.135 WARN Unable to free worker error: channel closed, msg: did not free worker, shutdown may be underway Apr 15 09:37:39.350 INFO Saved beacon chain to disk service: beacon Panic (fatal error) ``` --- .github/workflows/test-suite.yml | 2 +- Dockerfile | 2 +- lcli/Dockerfile | 2 +- lighthouse/Cargo.toml | 2 +- testing/antithesis/Dockerfile.libvoidstar | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 445f71fa09..b7321df784 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -13,7 +13,7 @@ env: # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) RUSTFLAGS: "-D warnings -C debuginfo=0" # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2022-12-15 + PINNED_NIGHTLY: nightly-2023-04-16 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: diff --git a/Dockerfile b/Dockerfile index 25ca075387..0d268c7e1a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.66.0-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 079e5c779b..98f33f2153 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.66.0-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 2b7727ec11..97289bf459 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.1" authors = ["Sigma Prime "] edition = "2021" autotests = false -rust-version = "1.66" +rust-version = "1.68.2" [features] default = ["slasher-mdbx"] diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index bae1807329..ddc49e13cd 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,4 +1,4 @@ -FROM rust:1.66.1-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse From 48843ba19843378da51a12d326ef989124162d92 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 19 Apr 2023 04:23:20 +0000 Subject: [PATCH 12/16] Check lateness of block before requeuing it (#4208) ## Issue Addressed NA ## Proposed Changes Avoids reprocessing loops introduced in #4179. (Also somewhat related to #4192). Breaks the re-queue loop by only re-queuing when an RPC block is received before the attestation creation deadline. I've put `proposal_is_known` behind a closure to avoid interacting with the `observed_proposers` lock unnecessarily. ## Additional Info NA --- .../beacon_processor/worker/sync_methods.rs | 52 +++++++++++++------ 1 file changed, 36 insertions(+), 16 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 6faf7ebd37..ca2095348a 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -9,12 +9,15 @@ use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::CountUnrealized; use beacon_chain::{ - observed_block_producers::Error as ObserveError, BeaconChainError, BeaconChainTypes, - BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, + observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, + BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, + NotifyExecutionLayer, }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; +use slot_clock::SlotClock; use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -83,21 +86,38 @@ impl Worker { return; } }; - // Check if a block from this proposer is already known. If so, defer processing until later - // to avoid wasting time processing duplicates. - let proposal_already_known = match self - .chain - .observed_block_producers - .read() - .proposer_has_been_observed(block.message()) - { - Ok(is_observed) => is_observed, - // Both of these blocks will be rejected, so reject them now rather - // than re-queuing them. - Err(ObserveError::FinalizedBlock { .. }) - | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, + + // Returns `true` if the time now is after the 4s attestation deadline. + let block_is_late = SystemTime::now() + .duration_since(UNIX_EPOCH) + // If we can't read the system time clock then indicate that the + // block is late (and therefore should *not* be requeued). This + // avoids infinite loops. + .map_or(true, |now| { + get_block_delay_ms(now, block.message(), &self.chain.slot_clock) + > self.chain.slot_clock.unagg_attestation_production_delay() + }); + + // Checks if a block from this proposer is already known. + let proposal_already_known = || { + match self + .chain + .observed_block_producers + .read() + .proposer_has_been_observed(block.message()) + { + Ok(is_observed) => is_observed, + // Both of these blocks will be rejected, so reject them now rather + // than re-queuing them. + Err(ObserveError::FinalizedBlock { .. }) + | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, + } }; - if proposal_already_known { + + // If we've already seen a block from this proposer *and* the block + // arrived before the attestation deadline, requeue it to ensure it is + // imported late enough that it won't receive a proposer boost. + if !block_is_late && proposal_already_known() { debug!( self.log, "Delaying processing of duplicate RPC block"; From 693886b94176faa4cb450f024696cb69cda2fe58 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 20 Apr 2023 00:51:38 +0000 Subject: [PATCH 13/16] Release v4.1.0 (#4191) ## Issue Addressed NA ## Proposed Changes Bump versions. ## Additional Info NA --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47fab8c0f6..a0f9fc7491 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -639,7 +639,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.0.1" +version = "4.1.0" dependencies = [ "beacon_chain", "clap", @@ -818,7 +818,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "4.0.1" +version = "4.1.0" dependencies = [ "beacon_node", "clap", @@ -3841,7 +3841,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.0.1" +version = "4.1.0" dependencies = [ "account_utils", "beacon_chain", @@ -4492,7 +4492,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.0.1" +version = "4.1.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index d8aa42a67e..95f145a557 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.0.1" +version = "4.1.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index c1b6333a37..d30f45ca29 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.0.1-", - fallback = "Lighthouse/v4.0.1" + prefix = "Lighthouse/v4.1.0-", + fallback = "Lighthouse/v4.1.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 22c4ce305f..cee62aa5e1 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.0.1" +version = "4.1.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 97289bf459..14efb72a3a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "4.0.1" +version = "4.1.0" authors = ["Sigma Prime "] edition = "2021" autotests = false From ed7824869cfd0720daada8ac66e3bea73379e2d5 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 21 Apr 2023 18:29:27 +0000 Subject: [PATCH 14/16] Update LLVM version to 15.0 in CI workflows (#4220) ## Issue Addressed The latest stable version (1.69.0) of Rust was released on 20 April and contains this change: - [Update the minimum external LLVM to 14.](https://github.com/rust-lang/rust/pull/107573/) This impacts some of our CI workflows (build and release-test-windows) that uses LLVM 13.0. This PR updates the workflows to install LLVM 15.0. **UPDATE**: Also updated `h2` to address [this issue](https://github.com/advisories/GHSA-f8vr-r385-rh5r) --- .github/workflows/release.yml | 2 +- .github/workflows/test-suite.yml | 2 +- Cargo.lock | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2e63b4d6c2..e6d79bd5ef 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -73,7 +73,7 @@ jobs: - uses: KyleMayes/install-llvm-action@v1 if: startsWith(matrix.arch, 'x86_64-windows') with: - version: "13.0" + version: "15.0" directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH if: startsWith(matrix.arch, 'x86_64-windows') diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index b7321df784..b18d7ee959 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -83,7 +83,7 @@ jobs: run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 with: - version: "13.0" + version: "15.0" directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV diff --git a/Cargo.lock b/Cargo.lock index a0f9fc7491..50446be153 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3068,9 +3068,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.16" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", From a78285db5e09c097791ebfc4f67c60b7eb8e5a11 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 21 Apr 2023 18:29:28 +0000 Subject: [PATCH 15/16] Fix Rust 1.69 lints (#4222) ## Issue Addressed N/A ## Proposed Changes Fixes lints mostly `extra-unused-type-parameters` https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_type_paramete --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +-- beacon_node/beacon_chain/src/eth1_chain.rs | 34 ++++++++----------- .../execution_layer/src/engine_api/http.rs | 2 +- .../types/src/beacon_state/tree_hash_cache.rs | 4 +-- lcli/src/check_deposit_data.rs | 3 +- lcli/src/main.rs | 2 +- 6 files changed, 22 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0165c54dc3..ca0c5ce15b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3619,7 +3619,7 @@ impl BeaconChain { let (state, state_root_opt) = self .task_executor .spawn_blocking_handle( - move || chain.load_state_for_block_production::(slot), + move || chain.load_state_for_block_production(slot), "produce_partial_beacon_block", ) .ok_or(BlockProductionError::ShuttingDown)? @@ -3642,7 +3642,7 @@ impl BeaconChain { /// Load a beacon state from the database for block production. This is a long-running process /// that should not be performed in an `async` context. - fn load_state_for_block_production>( + fn load_state_for_block_production( self: &Arc, slot: Slot, ) -> Result<(BeaconState, Option), BlockProductionError> { diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 25971bf85b..f820622e57 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -88,7 +88,7 @@ fn get_sync_status( let period = T::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (current_slot / period) * period; - let period_start = slot_start_seconds::( + let period_start = slot_start_seconds( genesis_time, spec.seconds_per_slot, voting_period_start_slot, @@ -470,7 +470,7 @@ impl Eth1ChainBackend for CachingEth1Backend { fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { let period = T::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot() / period) * period; - let voting_period_start_seconds = slot_start_seconds::( + let voting_period_start_seconds = slot_start_seconds( state.genesis_time(), spec.seconds_per_slot, voting_period_start_slot, @@ -658,11 +658,7 @@ fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option { } /// Returns the unix-epoch seconds at the start of the given `slot`. -fn slot_start_seconds( - genesis_unix_seconds: u64, - seconds_per_slot: u64, - slot: Slot, -) -> u64 { +fn slot_start_seconds(genesis_unix_seconds: u64, seconds_per_slot: u64, slot: Slot) -> u64 { genesis_unix_seconds + slot.as_u64() * seconds_per_slot } @@ -698,7 +694,7 @@ mod test { fn get_voting_period_start_seconds(state: &BeaconState, spec: &ChainSpec) -> u64 { let period = ::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot() / period) * period; - slot_start_seconds::( + slot_start_seconds( state.genesis_time(), spec.seconds_per_slot, voting_period_start_slot, @@ -708,23 +704,23 @@ mod test { #[test] fn slot_start_time() { let zero_sec = 0; - assert_eq!(slot_start_seconds::(100, zero_sec, Slot::new(2)), 100); + assert_eq!(slot_start_seconds(100, zero_sec, Slot::new(2)), 100); let one_sec = 1; - assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(1)), 101); - assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(2)), 102); + assert_eq!(slot_start_seconds(100, one_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds(100, one_sec, Slot::new(1)), 101); + assert_eq!(slot_start_seconds(100, one_sec, Slot::new(2)), 102); let three_sec = 3; - assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(1)), 103); - assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(2)), 106); + assert_eq!(slot_start_seconds(100, three_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds(100, three_sec, Slot::new(1)), 103); + assert_eq!(slot_start_seconds(100, three_sec, Slot::new(2)), 106); let five_sec = 5; - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(1)), 105); - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(2)), 110); - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(3)), 115); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(1)), 105); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(2)), 110); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(3)), 115); } fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 993957450b..f542bf5b4a 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -1187,7 +1187,7 @@ mod test { transactions, ..<_>::default() }); - let json = serde_json::to_value(&ep)?; + let json = serde_json::to_value(ep)?; Ok(json.get("transactions").unwrap().clone()) } diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index efc6573d2b..d1d63e3c80 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -403,7 +403,7 @@ impl ValidatorsListTreeHashCache { validators.len(), ), list_arena, - values: ParallelValidatorTreeHash::new::(validators), + values: ParallelValidatorTreeHash::new(validators), } } @@ -468,7 +468,7 @@ impl ParallelValidatorTreeHash { /// /// Allocates the necessary memory to store all of the cached Merkle trees but does perform any /// hashing. - fn new(validators: &[Validator]) -> Self { + fn new(validators: &[Validator]) -> Self { let num_arenas = std::cmp::max( 1, (validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA, diff --git a/lcli/src/check_deposit_data.rs b/lcli/src/check_deposit_data.rs index 56f18f9988..47c2c7addf 100644 --- a/lcli/src/check_deposit_data.rs +++ b/lcli/src/check_deposit_data.rs @@ -2,9 +2,8 @@ use clap::ArgMatches; use clap_utils::{parse_required, parse_ssz_required}; use deposit_contract::{decode_eth1_tx_data, DEPOSIT_DATA_LEN}; use tree_hash::TreeHash; -use types::EthSpec; -pub fn run(matches: &ArgMatches) -> Result<(), String> { +pub fn run(matches: &ArgMatches) -> Result<(), String> { let rlp_bytes = parse_ssz_required::>(matches, "deposit-data")?; let amount = parse_required(matches, "deposit-amount")?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index cdf9cfa677..eeb098f04d 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -847,7 +847,7 @@ fn run( } ("new-testnet", Some(matches)) => new_testnet::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run new_testnet command: {}", e)), - ("check-deposit-data", Some(matches)) => check_deposit_data::run::(matches) + ("check-deposit-data", Some(matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), From b2ccc822d8183068b6d2feee7fe81441841fbf80 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 21 Apr 2023 14:14:57 -0700 Subject: [PATCH 16/16] Fix compiler issues --- Dockerfile | 2 +- beacon_node/execution_layer/src/lib.rs | 1 + lcli/Dockerfile | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0d268c7e1a..6f44ae1248 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES ENV FEATURES $FEATURES diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index c5f067addd..a318f7d012 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1701,6 +1701,7 @@ impl ExecutionLayer { let payload = match fork { ForkName::Merge => ExecutionPayloadMerge::default().into(), ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Eip4844 => ExecutionPayloadEip4844::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::InvalidForkForPayload); } diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 98f33f2153..95b34cf343 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -2,7 +2,7 @@ # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE