From 3128b5b43010d460eadbdbe3c31bd9d67bc74dd0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Sep 2022 06:08:52 +0000 Subject: [PATCH 01/27] v3.1.1 (#3585) ## Issue Addressed NA ## Proposed Changes Bump versions ## Additional Info - ~~Requires additional testing~~ - ~~Blocked on:~~ - ~~#3589~~ - ~~#3540~~ - ~~#3587~~ --- Cargo.lock | 12 ++++++------ beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- crypto/eth2_hashing/Cargo.toml | 2 +- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9cd7ff2ff9..071cc0d797 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,7 +439,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.1.0" +version = "3.1.1" dependencies = [ "beacon_chain", "clap", @@ -597,7 +597,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.1.0" +version = "3.1.1" dependencies = [ "beacon_node", "clap", @@ -956,9 +956,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -3110,7 +3110,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.1.0" +version = "3.1.1" dependencies = [ "account_utils", "beacon_chain", @@ -3610,7 +3610,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.1.0" +version = "3.1.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 7245258bb2..1832778c56 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.1.0" +version = "3.1.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 85baa47fbb..be34f23d70 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.1.0-", - fallback = "Lighthouse/v3.1.0" + prefix = "Lighthouse/v3.1.1-", + fallback = "Lighthouse/v3.1.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index eb92d252d1..28f106fcbb 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -8,7 +8,7 @@ description = "Hashing primitives used in Ethereum 2.0" [dependencies] lazy_static = { version = "1.4.0", optional = true } -cpufeatures = { version = "0.2.2", optional = true } +cpufeatures = { version = "0.2.5", optional = true } ring = "0.16.19" sha2 = "0.10.2" diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index a39abb3f78..3cc7e7adde 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.1.0" +version = "3.1.1" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index b74e1516f4..1ae1c9b0b0 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.1.0" +version = "3.1.1" authors = ["Sigma Prime "] edition = "2021" autotests = false From 76ba0a1aaf0c945b510bac6539e5fba738c44dc6 Mon Sep 17 00:00:00 2001 From: Ramana Kumar Date: Fri, 23 Sep 2022 03:52:41 +0000 Subject: [PATCH 02/27] Add disable-log-timestamp flag (#3101) (#3586) ## Issues Addressed Closes https://github.com/sigp/lighthouse/issues/3101 ## Proposed Changes Add global flag to suppress timestamps in the terminal logger. --- lcli/src/main.rs | 1 + lighthouse/environment/src/lib.rs | 15 ++++++++++++++- lighthouse/src/main.rs | 9 +++++++++ testing/simulator/src/eth1_sim.rs | 1 + testing/simulator/src/no_eth1_sim.rs | 1 + testing/simulator/src/sync_sim.rs | 1 + 6 files changed, 27 insertions(+), 1 deletion(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 84b951e3f1..11a23fe0b4 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -785,6 +785,7 @@ fn run( logfile_debug_level: "trace", log_format: None, log_color: false, + disable_log_timestamp: false, max_log_size: 0, max_log_number: 0, compression: false, diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 679964c0de..46348e63ba 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -15,6 +15,7 @@ use futures::{future, StreamExt}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; use std::fs::create_dir_all; +use std::io::{Result as IOResult, Write}; use std::path::PathBuf; use std::sync::Arc; use task_executor::{ShutdownReason, TaskExecutor}; @@ -48,6 +49,7 @@ pub struct LoggerConfig<'a> { pub logfile_debug_level: &'a str, pub log_format: Option<&'a str>, pub log_color: bool, + pub disable_log_timestamp: bool, pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, @@ -121,6 +123,10 @@ impl EnvironmentBuilder { Ok(self) } + fn log_nothing(_: &mut dyn Write) -> IOResult<()> { + Ok(()) + } + /// Initializes the logger using the specified configuration. /// The logger is "async" because it has a dedicated thread that accepts logs and then /// asynchronously flushes them to stdout/files/etc. This means the thread that raised the log @@ -149,7 +155,14 @@ impl EnvironmentBuilder { .build(); let stdout_decorator = logging::AlignedTermDecorator::new(stdout_decorator, logging::MAX_MESSAGE_WIDTH); - let stdout_drain = slog_term::FullFormat::new(stdout_decorator).build().fuse(); + let stdout_drain = slog_term::FullFormat::new(stdout_decorator); + let stdout_drain = if config.disable_log_timestamp { + stdout_drain.use_custom_timestamp(Self::log_nothing) + } else { + stdout_drain + } + .build() + .fuse(); slog_async::Async::new(stdout_drain) .chan_size(LOG_CHANNEL_SIZE) .build() diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 7897494cc4..341e1a91d5 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -145,6 +145,12 @@ fn main() { .help("Force outputting colors when emitting logs to the terminal.") .global(true), ) + .arg( + Arg::with_name("disable-log-timestamp") + .long("disable-log-timestamp") + .help("If present, do not include timestamps in logging output.") + .global(true), + ) .arg( Arg::with_name("debug-level") .long("debug-level") @@ -381,6 +387,8 @@ fn run( let log_color = matches.is_present("log-color"); + let disable_log_timestamp = matches.is_present("disable-log-timestamp"); + let logfile_debug_level = matches .value_of("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; @@ -434,6 +442,7 @@ fn run( logfile_debug_level, log_format, log_color, + disable_log_timestamp, max_log_size: logfile_max_size * 1_024 * 1_024, max_log_number: logfile_max_number, compression: logfile_compress, diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 613573cd0d..5e346d5466 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -66,6 +66,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { logfile_debug_level: "debug", log_format, log_color: false, + disable_log_timestamp: false, max_log_size: 0, max_log_number: 0, compression: false, diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 28b8719843..57e2e01eb6 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -51,6 +51,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { logfile_debug_level: "debug", log_format, log_color: false, + disable_log_timestamp: false, max_log_size: 0, max_log_number: 0, compression: false, diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 07d774b8d4..af5ba95e01 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -52,6 +52,7 @@ fn syncing_sim( logfile_debug_level: "debug", log_format, log_color: false, + disable_log_timestamp: false, max_log_size: 0, max_log_number: 0, compression: false, From fa6ad1a11ad8345492d820ef22093e349a5c1abb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Sep 2022 03:52:42 +0000 Subject: [PATCH 03/27] Deduplicate block root computation (#3590) ## Issue Addressed NA ## Proposed Changes This PR removes duplicated block root computation. Computing the `SignedBeaconBlock::canonical_root` has become more expensive since the merge as we need to compute the merke root of each transaction inside an `ExecutionPayload`. Computing the root for [a mainnet block](https://beaconcha.in/slot/4704236) is taking ~10ms on my i7-8700K CPU @ 3.70GHz (no sha extensions). Given that our median seen-to-imported time for blocks is presently 300-400ms, removing a few duplicated block roots (~30ms) could represent an easy 10% improvement. When we consider that the seen-to-imported times include operations *after* the block has been placed in the early attester cache, we could expect the 30ms to be more significant WRT our seen-to-attestable times. ## Additional Info NA --- beacon_node/beacon_chain/src/beacon_chain.rs | 12 +++-- .../beacon_chain/src/block_verification.rs | 45 ++++++++++--------- beacon_node/beacon_chain/src/lib.rs | 4 +- beacon_node/beacon_chain/src/test_utils.rs | 13 ++++-- .../beacon_chain/tests/block_verification.rs | 35 ++++++++++++--- .../tests/payload_invalidation.rs | 30 ++++++++++--- beacon_node/beacon_chain/tests/store_tests.rs | 26 ++++++++--- beacon_node/beacon_chain/tests/tests.rs | 1 + beacon_node/execution_layer/src/lib.rs | 3 +- beacon_node/http_api/src/lib.rs | 2 +- beacon_node/http_api/src/publish_blocks.rs | 35 ++++++++++----- .../http_api/tests/interactive_tests.rs | 7 ++- .../network/src/beacon_processor/mod.rs | 7 +++ .../network/src/beacon_processor/tests.rs | 2 + .../work_reprocessing_queue.rs | 1 + .../beacon_processor/worker/gossip_methods.rs | 27 ++++++++--- .../beacon_processor/worker/sync_methods.rs | 14 ++++-- .../network/src/sync/block_lookups/mod.rs | 21 +++++---- .../src/sync/block_lookups/parent_lookup.rs | 20 ++++++--- .../sync/block_lookups/single_block_lookup.rs | 11 +++-- .../network/src/sync/block_lookups/tests.rs | 26 ++++++----- beacon_node/network/src/sync/manager.rs | 6 +-- testing/ef_tests/src/cases/fork_choice.rs | 10 ++--- 23 files changed, 252 insertions(+), 106 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 96439f4908..b0fac779a4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2220,7 +2220,7 @@ impl BeaconChain { } } - match check_block_relevancy(&block, Some(block_root), self) { + match check_block_relevancy(&block, block_root, self) { // If the block is relevant, add it to the filtered chain segment. Ok(_) => filtered_chain_segment.push((block_root, block)), // If the block is already known, simply ignore this block. @@ -2344,7 +2344,11 @@ impl BeaconChain { // Import the blocks into the chain. for signature_verified_block in signature_verified_blocks { match self - .process_block(signature_verified_block, count_unrealized) + .process_block( + signature_verified_block.block_root(), + signature_verified_block, + count_unrealized, + ) .await { Ok(_) => imported_blocks += 1, @@ -2429,6 +2433,7 @@ impl BeaconChain { /// verification. pub async fn process_block>( self: &Arc, + block_root: Hash256, unverified_block: B, count_unrealized: CountUnrealized, ) -> Result> { @@ -2444,7 +2449,8 @@ impl BeaconChain { // A small closure to group the verification and import errors. let chain = self.clone(); let import_block = async move { - let execution_pending = unverified_block.into_execution_pending_block(&chain)?; + let execution_pending = + unverified_block.into_execution_pending_block(block_root, &chain)?; chain .import_execution_pending_block(execution_pending, count_unrealized) .await diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index cdcbf3f68e..f83bc535d9 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -529,7 +529,7 @@ pub fn signature_verify_chain_segment( } let (first_root, first_block) = chain_segment.remove(0); - let (mut parent, first_block) = load_parent(first_block, chain)?; + let (mut parent, first_block) = load_parent(first_root, first_block, chain)?; let slot = first_block.slot(); chain_segment.insert(0, (first_root, first_block)); @@ -622,9 +622,10 @@ pub struct ExecutionPendingBlock { pub trait IntoExecutionPendingBlock: Sized { fn into_execution_pending_block( self, + block_root: Hash256, chain: &Arc>, ) -> Result, BlockError> { - self.into_execution_pending_block_slashable(chain) + self.into_execution_pending_block_slashable(block_root, chain) .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { @@ -638,6 +639,7 @@ pub trait IntoExecutionPendingBlock: Sized { /// Convert the block to fully-verified form while producing data to aid checking slashability. fn into_execution_pending_block_slashable( self, + block_root: Hash256, chain: &Arc>, ) -> Result, BlockSlashInfo>>; @@ -781,7 +783,7 @@ impl GossipVerifiedBlock { } else { // The proposer index was *not* cached and we must load the parent in order to determine // the proposer index. - let (mut parent, block) = load_parent(block, chain)?; + let (mut parent, block) = load_parent(block_root, block, chain)?; debug!( chain.log, @@ -877,11 +879,12 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock>, ) -> Result, BlockSlashInfo>> { let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; - execution_pending.into_execution_pending_block_slashable(chain) + execution_pending.into_execution_pending_block_slashable(block_root, chain) } fn block(&self) -> &SignedBeaconBlock { @@ -907,7 +910,7 @@ impl SignatureVerifiedBlock { // Check the anchor slot before loading the parent, to avoid spurious lookups. check_block_against_anchor_slot(block.message(), chain)?; - let (mut parent, block) = load_parent(block, chain)?; + let (mut parent, block) = load_parent(block_root, block, chain)?; // Reject any block that exceeds our limit on skipped slots. check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; @@ -955,7 +958,7 @@ impl SignatureVerifiedBlock { let (mut parent, block) = if let Some(parent) = from.parent { (parent, from.block) } else { - load_parent(from.block, chain)? + load_parent(from.block_root, from.block, chain)? }; let state = cheap_state_advance_to_obtain_committees( @@ -991,29 +994,29 @@ impl SignatureVerifiedBlock { Self::from_gossip_verified_block(from, chain) .map_err(|e| BlockSlashInfo::from_early_error(header, e)) } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } } impl IntoExecutionPendingBlock for SignatureVerifiedBlock { /// Completes verification of the wrapped `block`. fn into_execution_pending_block_slashable( self, + block_root: Hash256, chain: &Arc>, ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) } else { - load_parent(self.block, chain) + load_parent(self.block_root, self.block, chain) .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? }; - ExecutionPendingBlock::from_signature_verified_components( - block, - self.block_root, - parent, - chain, - ) - .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) + ExecutionPendingBlock::from_signature_verified_components(block, block_root, parent, chain) + .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) } fn block(&self) -> &SignedBeaconBlock { @@ -1026,14 +1029,15 @@ impl IntoExecutionPendingBlock for Arc>, ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. - let block_root = check_block_relevancy(&self, None, chain) + let block_root = check_block_relevancy(&self, block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? - .into_execution_pending_block_slashable(chain) + .into_execution_pending_block_slashable(block_root, chain) } fn block(&self) -> &SignedBeaconBlock { @@ -1088,7 +1092,7 @@ impl ExecutionPendingBlock { * Perform cursory checks to see if the block is even worth processing. */ - check_block_relevancy(&block, Some(block_root), chain)?; + check_block_relevancy(&block, block_root, chain)?; /* * Advance the given `parent.beacon_state` to the slot of the given `block`. @@ -1502,7 +1506,7 @@ pub fn check_block_is_finalized_descendant( /// experienced whilst attempting to verify. pub fn check_block_relevancy( signed_block: &SignedBeaconBlock, - block_root: Option, + block_root: Hash256, chain: &BeaconChain, ) -> Result> { let block = signed_block.message(); @@ -1526,8 +1530,6 @@ pub fn check_block_relevancy( return Err(BlockError::BlockSlotLimitReached); } - let block_root = block_root.unwrap_or_else(|| get_block_root(signed_block)); - // Do not process a block from a finalized slot. check_block_against_finalized_slot(block, block_root, chain)?; @@ -1581,6 +1583,7 @@ fn verify_parent_block_is_known( /// whilst attempting the operation. #[allow(clippy::type_complexity)] fn load_parent( + block_root: Hash256, block: Arc>, chain: &BeaconChain, ) -> Result< @@ -1614,7 +1617,7 @@ fn load_parent( .block_times_cache .read() .get_block_delays( - block.canonical_root(), + block_root, chain .slot_clock .start_of(block.slot()) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 1e704deba5..fbcd8f7fb7 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -55,7 +55,9 @@ pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; -pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBlock}; +pub use block_verification::{ + get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock, +}; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a62608202e..f49563b149 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1453,12 +1453,13 @@ where pub async fn process_block( &self, slot: Slot, + block_root: Hash256, block: SignedBeaconBlock, ) -> Result> { self.set_current_slot(slot); let block_hash: SignedBeaconBlockHash = self .chain - .process_block(Arc::new(block), CountUnrealized::True) + .process_block(block_root, Arc::new(block), CountUnrealized::True) .await? .into(); self.chain.recompute_head_at_current_slot().await; @@ -1471,7 +1472,11 @@ where ) -> Result> { let block_hash: SignedBeaconBlockHash = self .chain - .process_block(Arc::new(block), CountUnrealized::True) + .process_block( + block.canonical_root(), + Arc::new(block), + CountUnrealized::True, + ) .await? .into(); self.chain.recompute_head_at_current_slot().await; @@ -1536,7 +1541,9 @@ where ) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock, BeaconState), BlockError> { self.set_current_slot(slot); let (block, new_state) = self.make_block(state, slot).await; - let block_hash = self.process_block(slot, block.clone()).await?; + let block_hash = self + .process_block(slot, block.canonical_root(), block.clone()) + .await?; Ok((block_hash, block, new_state)) } diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 17c84bd697..0ff4e57a8a 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -346,6 +346,7 @@ async fn assert_invalid_signature( let process_res = harness .chain .process_block( + snapshots[block_index].beacon_block.canonical_root(), snapshots[block_index].beacon_block.clone(), CountUnrealized::True, ) @@ -403,12 +404,14 @@ async fn invalid_signature_gossip_block() { .await .into_block_error() .expect("should import all blocks prior to the one being tested"); + let signed_block = SignedBeaconBlock::from_block(block, junk_signature()); assert!( matches!( harness .chain .process_block( - Arc::new(SignedBeaconBlock::from_block(block, junk_signature())), + signed_block.canonical_root(), + Arc::new(signed_block), CountUnrealized::True ) .await, @@ -718,7 +721,11 @@ async fn block_gossip_verification() { harness .chain - .process_block(gossip_verified, CountUnrealized::True) + .process_block( + gossip_verified.block_root, + gossip_verified, + CountUnrealized::True, + ) .await .expect("should import valid gossip verified block"); } @@ -985,7 +992,11 @@ async fn verify_block_for_gossip_slashing_detection() { .unwrap(); harness .chain - .process_block(verified_block, CountUnrealized::True) + .process_block( + verified_block.block_root, + verified_block, + CountUnrealized::True, + ) .await .unwrap(); unwrap_err( @@ -1020,7 +1031,11 @@ async fn verify_block_for_gossip_doppelganger_detection() { let attestations = verified_block.block.message().body().attestations().clone(); harness .chain - .process_block(verified_block, CountUnrealized::True) + .process_block( + verified_block.block_root, + verified_block, + CountUnrealized::True, + ) .await .unwrap(); @@ -1161,7 +1176,11 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_block(Arc::new(base_block.clone()), CountUnrealized::True) + .process_block( + base_block.canonical_root(), + Arc::new(base_block.clone()), + CountUnrealized::True + ) .await .err() .expect("should error when processing base block"), @@ -1289,7 +1308,11 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_block(Arc::new(altair_block.clone()), CountUnrealized::True) + .process_block( + altair_block.canonical_root(), + Arc::new(altair_block.clone()), + CountUnrealized::True + ) .await .err() .expect("should error when processing altair block"), diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 027a708cfa..2336c3ba99 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -281,7 +281,7 @@ impl InvalidPayloadRig { } let root = self .harness - .process_block(slot, block.clone()) + .process_block(slot, block.canonical_root(), block.clone()) .await .unwrap(); @@ -320,7 +320,11 @@ impl InvalidPayloadRig { set_new_payload(new_payload_response); set_forkchoice_updated(forkchoice_response); - match self.harness.process_block(slot, block).await { + match self + .harness + .process_block(slot, block.canonical_root(), block) + .await + { Err(error) if evaluate_error(&error) => (), Err(other) => { panic!("evaluate_error returned false with {:?}", other) @@ -685,7 +689,11 @@ async fn invalidates_all_descendants() { let fork_block_root = rig .harness .chain - .process_block(Arc::new(fork_block), CountUnrealized::True) + .process_block( + fork_block.canonical_root(), + Arc::new(fork_block), + CountUnrealized::True, + ) .await .unwrap(); rig.recompute_head().await; @@ -777,7 +785,11 @@ async fn switches_heads() { let fork_block_root = rig .harness .chain - .process_block(Arc::new(fork_block), CountUnrealized::True) + .process_block( + fork_block.canonical_root(), + Arc::new(fork_block), + CountUnrealized::True, + ) .await .unwrap(); rig.recompute_head().await; @@ -1023,7 +1035,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.clone(), CountUnrealized::True).await, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1305,7 +1317,7 @@ async fn build_optimistic_chain( for block in blocks { rig.harness .chain - .process_block(block, CountUnrealized::True) + .process_block(block.canonical_root(), block, CountUnrealized::True) .await .unwrap(); } @@ -1863,7 +1875,11 @@ async fn recover_from_invalid_head_by_importing_blocks() { // Import the fork block, it should become the head. rig.harness .chain - .process_block(fork_block.clone(), CountUnrealized::True) + .process_block( + fork_block.canonical_root(), + fork_block.clone(), + CountUnrealized::True, + ) .await .unwrap(); rig.recompute_head().await; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index b85ff50efb..2fcd74be4b 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2125,7 +2125,11 @@ async fn weak_subjectivity_sync() { beacon_chain.slot_clock.set_slot(slot.as_u64()); beacon_chain - .process_block(Arc::new(full_block), CountUnrealized::True) + .process_block( + full_block.canonical_root(), + Arc::new(full_block), + CountUnrealized::True, + ) .await .unwrap(); beacon_chain.recompute_head_at_current_slot().await; @@ -2382,8 +2386,14 @@ async fn revert_minority_fork_on_resume() { let (block, new_state) = harness1.make_block(state, slot).await; - harness1.process_block(slot, block.clone()).await.unwrap(); - harness2.process_block(slot, block.clone()).await.unwrap(); + harness1 + .process_block(slot, block.canonical_root(), block.clone()) + .await + .unwrap(); + harness2 + .process_block(slot, block.canonical_root(), block.clone()) + .await + .unwrap(); state = new_state; block_root = block.canonical_root(); @@ -2416,12 +2426,18 @@ async fn revert_minority_fork_on_resume() { // Minority chain block (no attesters). let (block1, new_state1) = harness1.make_block(state1, slot).await; - harness1.process_block(slot, block1).await.unwrap(); + harness1 + .process_block(slot, block1.canonical_root(), block1) + .await + .unwrap(); state1 = new_state1; // Majority chain block (all attesters). let (block2, new_state2) = harness2.make_block(state2, slot).await; - harness2.process_block(slot, block2.clone()).await.unwrap(); + harness2 + .process_block(slot, block2.canonical_root(), block2.clone()) + .await + .unwrap(); state2 = new_state2; block_root = block2.canonical_root(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index f7d443748d..a13946bf2b 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -685,6 +685,7 @@ async fn run_skip_slot_test(skip_slots: u64) { harness_b .chain .process_block( + harness_a.chain.head_snapshot().beacon_block_root, harness_a.chain.head_snapshot().beacon_block.clone(), CountUnrealized::True ) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 89dc3f68e9..68071ee9b1 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1393,12 +1393,13 @@ impl ExecutionLayer { pub async fn propose_blinded_beacon_block( &self, + block_root: Hash256, block: &SignedBeaconBlock>, ) -> Result, Error> { debug!( self.log(), "Sending block to builder"; - "root" => ?block.canonical_root(), + "root" => ?block_root, ); if let Some(builder) = self.builder() { builder diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5c2660b303..312f2a29e2 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1046,7 +1046,7 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - publish_blocks::publish_block(block, chain, &network_tx, log) + publish_blocks::publish_block(None, block, chain, &network_tx, log) .await .map(|()| warp::reply()) }, diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 60ca8f2328..3c50fb95a2 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -9,13 +9,14 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, + BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, SignedBeaconBlock, }; use warp::Rejection; /// Handles a request from the HTTP API for full blocks. pub async fn publish_block( + block_root: Option, block: Arc>, chain: Arc>, network_tx: &UnboundedSender>, @@ -31,8 +32,10 @@ pub async fn publish_block( let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); + let block_root = block_root.unwrap_or_else(|| block.canonical_root()); + match chain - .process_block(block.clone(), CountUnrealized::True) + .process_block(block_root, block.clone(), CountUnrealized::True) .await { Ok(root) => { @@ -127,8 +130,16 @@ pub async fn publish_blinded_block( network_tx: &UnboundedSender>, log: Logger, ) -> Result<(), Rejection> { - let full_block = reconstruct_block(chain.clone(), block, log.clone()).await?; - publish_block::(Arc::new(full_block), chain, network_tx, log).await + let block_root = block.canonical_root(); + let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; + publish_block::( + Some(block_root), + Arc::new(full_block), + chain, + network_tx, + log, + ) + .await } /// Deconstruct the given blinded block, and construct a full block. This attempts to use the @@ -136,6 +147,7 @@ pub async fn publish_blinded_block( /// the full payload. async fn reconstruct_block( chain: Arc>, + block_root: Hash256, block: SignedBeaconBlock>, log: Logger, ) -> Result>, Rejection> { @@ -155,12 +167,15 @@ async fn reconstruct_block( cached_payload // Otherwise, this means we are attempting a blind block proposal. } else { - let full_payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "Blind block proposal failed: {:?}", - e - )) - })?; + let full_payload = el + .propose_blinded_beacon_block(block_root, &block) + .await + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Blind block proposal failed: {:?}", + e + )) + })?; info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); full_payload }; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 3327093d09..b3227d7723 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -67,7 +67,10 @@ pub async fn fork_choice_before_proposal() { let state_a = harness.get_current_state(); let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await; - let block_root_b = harness.process_block(slot_b, block_b).await.unwrap(); + let block_root_b = harness + .process_block(slot_b, block_b.canonical_root(), block_b) + .await + .unwrap(); // Create attestations to B but keep them in reserve until after C has been processed. let attestations_b = harness.make_attestations( @@ -80,7 +83,7 @@ pub async fn fork_choice_before_proposal() { let (block_c, state_c) = harness.make_block(state_a, slot_c).await; let block_root_c = harness - .process_block(slot_c, block_c.clone()) + .process_block(slot_c, block_c.canonical_root(), block_c.clone()) .await .unwrap(); diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index e9a115904d..f477878ac0 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -489,6 +489,7 @@ impl WorkEvent { /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn rpc_beacon_block( + block_root: Hash256, block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, @@ -496,6 +497,7 @@ impl WorkEvent { Self { drop_during_sync: false, work: Work::RpcBlock { + block_root, block, seen_timestamp, process_type, @@ -577,6 +579,7 @@ impl std::convert::From> for WorkEvent { }, }, ReadyWork::RpcBlock(QueuedRpcBlock { + block_root, block, seen_timestamp, process_type, @@ -584,6 +587,7 @@ impl std::convert::From> for WorkEvent { }) => Self { drop_during_sync: false, work: Work::RpcBlock { + block_root, block, seen_timestamp, process_type, @@ -705,6 +709,7 @@ pub enum Work { seen_timestamp: Duration, }, RpcBlock { + block_root: Hash256, block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, @@ -1532,11 +1537,13 @@ impl BeaconProcessor { * Verification for beacon blocks received during syncing via RPC. */ Work::RpcBlock { + block_root, block, seen_timestamp, process_type, should_process, } => task_spawner.spawn_async(worker.process_rpc_block( + block_root, block, seen_timestamp, process_type, diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 05854ac1e2..ea1a59e0d0 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -242,6 +242,7 @@ impl TestRig { pub fn enqueue_rpc_block(&self) { let event = WorkEvent::rpc_beacon_block( + self.next_block.canonical_root(), self.next_block.clone(), std::time::Duration::default(), BlockProcessType::ParentLookup { @@ -253,6 +254,7 @@ impl TestRig { pub fn enqueue_single_lookup_rpc_block(&self) { let event = WorkEvent::rpc_beacon_block( + self.next_block.canonical_root(), self.next_block.clone(), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index efe8d3bf12..2aeec11c32 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -109,6 +109,7 @@ pub struct QueuedGossipBlock { /// A block that arrived for processing when the same block was being imported over gossip. /// It is queued for later import. pub struct QueuedRpcBlock { + pub block_root: Hash256, pub block: Arc>, pub process_type: BlockProcessType, pub seen_timestamp: Duration, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 307b569a91..eaf5cd005c 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -713,16 +713,28 @@ impl Worker { block_delay, ); + let verification_result = self + .chain + .clone() + .verify_block_for_gossip(block.clone()) + .await; + + let block_root = if let Ok(verified_block) = &verification_result { + verified_block.block_root + } else { + block.canonical_root() + }; + // Write the time the block was observed into delay cache. self.chain.block_times_cache.write().set_time_observed( - block.canonical_root(), + block_root, block.slot(), seen_duration, Some(peer_id.to_string()), Some(peer_client.to_string()), ); - let verified_block = match self.chain.clone().verify_block_for_gossip(block).await { + let verified_block = match verification_result { Ok(verified_block) => { if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL); @@ -762,9 +774,9 @@ impl Worker { debug!( self.log, "Unknown parent for gossip block"; - "root" => ?block.canonical_root() + "root" => ?block_root ); - self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); + self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root)); return None; } Err(e @ BlockError::BeaconChainError(_)) => { @@ -918,10 +930,11 @@ impl Worker { _seen_duration: Duration, ) { let block: Arc<_> = verified_block.block.clone(); + let block_root = verified_block.block_root; match self .chain - .process_block(verified_block, CountUnrealized::True) + .process_block(block_root, verified_block, CountUnrealized::True) .await { Ok(block_root) => { @@ -956,7 +969,7 @@ impl Worker { "Block with unknown parent attempted to be processed"; "peer_id" => %peer_id ); - self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); + self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root)); } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!( @@ -970,7 +983,7 @@ impl Worker { self.log, "Invalid gossip beacon block"; "outcome" => ?other, - "block root" => ?block.canonical_root(), + "block root" => ?block_root, "block slot" => block.slot() ); self.gossip_penalize_peer( diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 760896e0e9..5d97894fe4 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -38,8 +38,10 @@ struct ChainSegmentFailed { impl Worker { /// Attempt to process a block received from a direct RPC request. + #[allow(clippy::too_many_arguments)] pub async fn process_rpc_block( self, + block_root: Hash256, block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, @@ -56,17 +58,18 @@ impl Worker { return; } // Check if the block is already being imported through another source - let handle = match duplicate_cache.check_and_insert(block.canonical_root()) { + let handle = match duplicate_cache.check_and_insert(block_root) { Some(handle) => handle, None => { debug!( self.log, "Gossip block is being processed"; "action" => "sending rpc block to reprocessing queue", - "block_root" => %block.canonical_root(), + "block_root" => %block_root, ); // Send message to work reprocess queue to retry the block let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + block_root, block: block.clone(), process_type, seen_timestamp, @@ -74,13 +77,16 @@ impl Worker { }); if reprocess_tx.try_send(reprocess_msg).is_err() { - error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block.canonical_root()) + error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root) }; return; } }; let slot = block.slot(); - let result = self.chain.process_block(block, CountUnrealized::True).await; + let result = self + .chain + .process_block(block_root, block, CountUnrealized::True) + .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 22d815121a..5c2bc65229 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -30,6 +30,8 @@ mod single_block_lookup; #[cfg(test)] mod tests; +pub type RootBlockTuple = (Hash256, Arc>); + const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; @@ -101,11 +103,11 @@ impl BlockLookups { /// called in order to find the block's parent. pub fn search_parent( &mut self, + block_root: Hash256, block: Arc>, peer_id: PeerId, cx: &mut SyncNetworkContext, ) { - let block_root = block.canonical_root(); let parent_root = block.parent_root(); // If this block or it's parent is part of a known failed chain, ignore it. if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) { @@ -125,7 +127,7 @@ impl BlockLookups { return; } - let parent_lookup = ParentLookup::new(block, peer_id); + let parent_lookup = ParentLookup::new(block_root, block, peer_id); self.request_parent(parent_lookup, cx); } @@ -153,10 +155,11 @@ impl BlockLookups { }; match request.get_mut().verify_block(block) { - Ok(Some(block)) => { + Ok(Some((block_root, block))) => { // This is the correct block, send it for processing if self .send_block_for_processing( + block_root, block, seen_timestamp, BlockProcessType::SingleBlock { id }, @@ -217,11 +220,12 @@ impl BlockLookups { }; match parent_lookup.verify_block(block, &mut self.failed_chains) { - Ok(Some(block)) => { + Ok(Some((block_root, block))) => { // Block is correct, send to the beacon processor. let chain_hash = parent_lookup.chain_hash(); if self .send_block_for_processing( + block_root, block, seen_timestamp, BlockProcessType::ParentLookup { chain_hash }, @@ -420,7 +424,7 @@ impl BlockLookups { error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); } BlockError::ParentUnknown(block) => { - self.search_parent(block, peer_id, cx); + self.search_parent(root, block, peer_id, cx); } ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline @@ -625,6 +629,7 @@ impl BlockLookups { fn send_block_for_processing( &mut self, + block_root: Hash256, block: Arc>, duration: Duration, process_type: BlockProcessType, @@ -632,8 +637,8 @@ impl BlockLookups { ) -> Result<(), ()> { match cx.processor_channel_if_enabled() { Some(beacon_processor_send) => { - trace!(self.log, "Sending block for processing"; "block" => %block.canonical_root(), "process" => ?process_type); - let event = WorkEvent::rpc_beacon_block(block, duration, process_type); + trace!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type); + let event = WorkEvent::rpc_beacon_block(block_root, block, duration, process_type); if let Err(e) = beacon_processor_send.try_send(event) { error!( self.log, @@ -646,7 +651,7 @@ impl BlockLookups { } } None => { - trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block.canonical_root()); + trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block_root); Err(()) } } diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 295d9cc94b..38ad59ebc4 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,3 +1,4 @@ +use super::RootBlockTuple; use beacon_chain::BeaconChainTypes; use lighthouse_network::PeerId; use std::sync::Arc; @@ -58,11 +59,15 @@ impl ParentLookup { .any(|d_block| d_block.as_ref() == block) } - pub fn new(block: Arc>, peer_id: PeerId) -> Self { + pub fn new( + block_root: Hash256, + block: Arc>, + peer_id: PeerId, + ) -> Self { let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); Self { - chain_hash: block.canonical_root(), + chain_hash: block_root, downloaded_blocks: vec![block], current_parent_request, current_parent_request_id: None, @@ -130,12 +135,15 @@ impl ParentLookup { &mut self, block: Option>>, failed_chains: &mut lru_cache::LRUTimeCache, - ) -> Result>>, VerifyError> { - let block = self.current_parent_request.verify_block(block)?; + ) -> Result>, VerifyError> { + let root_and_block = self.current_parent_request.verify_block(block)?; // check if the parent of this block isn't in the failed cache. If it is, this chain should // be dropped and the peer downscored. - if let Some(parent_root) = block.as_ref().map(|block| block.parent_root()) { + if let Some(parent_root) = root_and_block + .as_ref() + .map(|(_, block)| block.parent_root()) + { if failed_chains.contains(&parent_root) { self.current_parent_request.register_failure_downloading(); self.current_parent_request_id = None; @@ -143,7 +151,7 @@ impl ParentLookup { } } - Ok(block) + Ok(root_and_block) } pub fn get_processing_peer(&self, chain_hash: Hash256) -> Option { diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 8ba5b17bfa..256a2b4297 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,6 +1,8 @@ use std::collections::HashSet; use std::sync::Arc; +use super::RootBlockTuple; +use beacon_chain::get_block_root; use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; use ssz_types::VariableList; @@ -104,7 +106,7 @@ impl SingleBlockRequest { pub fn verify_block( &mut self, block: Option>>, - ) -> Result>>, VerifyError> { + ) -> Result>, VerifyError> { match self.state { State::AwaitingDownload => { self.register_failure_downloading(); @@ -112,7 +114,10 @@ impl SingleBlockRequest { } State::Downloading { peer_id } => match block { Some(block) => { - if block.canonical_root() != self.hash { + // Compute the block root using this specific function so that we can get timing + // metrics. + let block_root = get_block_root(&block); + if block_root != self.hash { // return an error and drop the block // NOTE: we take this is as a download failure to prevent counting the // attempt as a chain failure, but simply a peer failure. @@ -121,7 +126,7 @@ impl SingleBlockRequest { } else { // Return the block for processing. self.state = State::Processing { peer_id }; - Ok(Some(block)) + Ok(Some((block_root, block))) } } None => { diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index ead15e23a5..64a1a6e836 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -272,7 +272,7 @@ fn test_parent_lookup_happy_path() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Arc::new(block), peer_id, &mut cx); + bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); let id = rig.expect_parent_request(); // Peer sends the right block, it should be sent for processing. Peer should not be penalized. @@ -300,7 +300,7 @@ fn test_parent_lookup_wrong_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Arc::new(block), peer_id, &mut cx); + bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends the wrong block, peer should be penalized and the block re-requested. @@ -337,7 +337,7 @@ fn test_parent_lookup_empty_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Arc::new(block), peer_id, &mut cx); + bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends an empty response, peer should be penalized and the block re-requested. @@ -369,7 +369,7 @@ fn test_parent_lookup_rpc_failure() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Arc::new(block), peer_id, &mut cx); + bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // The request fails. It should be tried again. @@ -396,10 +396,11 @@ fn test_parent_lookup_too_many_attempts() { let parent = rig.rand_block(); let block = rig.block_with_parent(parent.canonical_root()); + let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Arc::new(block), peer_id, &mut cx); + bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { let id = rig.expect_parent_request(); match i % 2 { @@ -435,7 +436,7 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Arc::new(block), peer_id, &mut cx); + bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { assert!(!bl.failed_chains.contains(&block_hash)); let id = rig.expect_parent_request(); @@ -469,7 +470,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Arc::new(block), peer_id, &mut cx); + bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx); // Fail downloading the block for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { @@ -510,7 +511,7 @@ fn test_parent_lookup_too_deep() { let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); + bl.search_parent(chain_hash, Arc::new(trigger_block), peer_id, &mut cx); for block in blocks.into_iter().rev() { let id = rig.expect_parent_request(); @@ -537,7 +538,12 @@ fn test_parent_lookup_disconnection() { let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let peer_id = PeerId::random(); let trigger_block = rig.rand_block(); - bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); + bl.search_parent( + trigger_block.canonical_root(), + Arc::new(trigger_block), + peer_id, + &mut cx, + ); bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_queue.is_empty()); } @@ -581,7 +587,7 @@ fn test_parent_lookup_ignored_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Arc::new(block), peer_id, &mut cx); + bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); let id = rig.expect_parent_request(); // Peer sends the right block, it should be sent for processing. Peer should not be penalized. diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 6230347977..cdef904715 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -94,7 +94,7 @@ pub enum SyncMessage { }, /// A block with an unknown parent has been received. - UnknownBlock(PeerId, Arc>), + UnknownBlock(PeerId, Arc>, Hash256), /// A peer has sent an object that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. @@ -503,7 +503,7 @@ impl SyncManager { } => { self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); } - SyncMessage::UnknownBlock(peer_id, block) => { + SyncMessage::UnknownBlock(peer_id, block, block_root) => { // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore if !self.network_globals.sync_state.read().is_synced() { let head_slot = self.chain.canonical_head.cached_head().head_slot(); @@ -523,7 +523,7 @@ impl SyncManager { && self.network.is_execution_engine_online() { self.block_lookups - .search_parent(block, peer_id, &mut self.network); + .search_parent(block_root, block, peer_id, &mut self.network); } } SyncMessage::UnknownBlockHash(peer_id, block_hash) => { diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 650452d783..0e1bb2aced 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -331,11 +331,11 @@ impl Tester { pub fn process_block(&self, block: SignedBeaconBlock, valid: bool) -> Result<(), Error> { let block_root = block.canonical_root(); let block = Arc::new(block); - let result = self.block_on_dangerous( - self.harness - .chain - .process_block(block.clone(), CountUnrealized::False), - )?; + let result = self.block_on_dangerous(self.harness.chain.process_block( + block_root, + block.clone(), + CountUnrealized::False, + ))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", From 3a3dddc5fbe9a554eab937afd263c571271fdd1d Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 23 Sep 2022 03:52:43 +0000 Subject: [PATCH 04/27] Fix ee integration tests (#3592) ## Issue Addressed Resolves #3573 ## Proposed Changes Fix the bytecode for the deposit contract deployment transaction and value for deposit transaction in the execution engine integration tests. Also verify that all published transaction make it to the execution payload and have a valid status. --- .../src/test_rig.rs | 19 +++++++++++++--- .../src/transactions.rs | 22 ++++++++++++------- 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 0aa960bc41..515e238e97 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -239,13 +239,16 @@ impl TestRig { // Submit transactions before getting payload let txs = transactions::(account1, account2); + let mut pending_txs = Vec::new(); for tx in txs.clone().into_iter() { - self.ee_a + let pending_tx = self + .ee_a .execution_engine .provider .send_transaction(tx, None) .await .unwrap(); + pending_txs.push(pending_tx); } /* @@ -328,8 +331,6 @@ impl TestRig { .unwrap() .execution_payload; - assert_eq!(valid_payload.transactions.len(), txs.len()); - /* * Execution Engine A: * @@ -393,6 +394,18 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); + assert_eq!(valid_payload.transactions.len(), pending_txs.len()); + + // Verify that all submitted txs were successful + for pending_tx in pending_txs { + let tx_receipt = pending_tx.await.unwrap().unwrap(); + assert_eq!( + tx_receipt.status, + Some(1.into()), + "Tx index {} has invalid status ", + tx_receipt.transaction_index + ); + } /* * Execution Engine A: diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs index 144946682b..a8c0ab3c15 100644 --- a/testing/execution_engine_integration/src/transactions.rs +++ b/testing/execution_engine_integration/src/transactions.rs @@ -1,7 +1,7 @@ use deposit_contract::{encode_eth1_tx_data, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS}; use ethers_core::types::{ transaction::{eip2718::TypedTransaction, eip2930::AccessList}, - Address, Bytes, Eip1559TransactionRequest, TransactionRequest, + Address, Bytes, Eip1559TransactionRequest, TransactionRequest, U256, }; use types::{DepositData, EthSpec, Hash256, Keypair, Signature}; @@ -56,30 +56,36 @@ impl Transaction { .value(1) .with_access_list(AccessList::default()) .into(), - Self::DeployDepositContract(addr) => TransactionRequest::new() - .from(*addr) - .data(Bytes::from(BYTECODE.to_vec())) - .gas(CONTRACT_DEPLOY_GAS) - .into(), + Self::DeployDepositContract(addr) => { + let mut bytecode = String::from_utf8(BYTECODE.to_vec()).unwrap(); + bytecode.retain(|c| c.is_ascii_hexdigit()); + let bytecode = hex::decode(&bytecode[1..]).unwrap(); + TransactionRequest::new() + .from(*addr) + .data(Bytes::from(bytecode)) + .gas(CONTRACT_DEPLOY_GAS) + .into() + } Self::DepositDepositContract { sender, deposit_contract_address, } => { let keypair = Keypair::random(); + let amount: u64 = 32_000_000_000; let mut deposit = DepositData { pubkey: keypair.pk.into(), withdrawal_credentials: Hash256::zero(), - amount: 32_000_000_000, + amount, signature: Signature::empty().into(), }; - deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); TransactionRequest::new() .from(*sender) .to(*deposit_contract_address) .data(Bytes::from(encode_eth1_tx_data(&deposit).unwrap())) .gas(DEPOSIT_GAS) + .value(U256::from(amount) * U256::exp10(9)) .into() } } From 9246a92d76bb06c0b61adefc87f5e3467eb22c03 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Sep 2022 03:52:44 +0000 Subject: [PATCH 05/27] Make garbage collection test less failure prone (#3599) ## Issue Addressed NA ## Proposed Changes This PR attempts to fix the following spurious CI failure: ``` ---- store_tests::garbage_collect_temp_states_from_failed_block stdout ---- thread 'store_tests::garbage_collect_temp_states_from_failed_block' panicked at 'disk store should initialize: DBError { message: "Error { message: \"IO error: lock /tmp/.tmp6DcBQ9/cold_db/LOCK: already held by process\" }" }', beacon_node/beacon_chain/tests/store_tests.rs:59:10 ``` I believe that some async task is taking a clone of the store and holding it in some other thread for a short time. This creates a race-condition when we try to open a new instance of the store. ## Additional Info NA --- beacon_node/beacon_chain/tests/store_tests.rs | 75 +++++++++++-------- 1 file changed, 43 insertions(+), 32 deletions(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 2fcd74be4b..883b871b1c 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -26,6 +26,7 @@ use store::{ HotColdDB, LevelDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; +use tokio::time::sleep; use tree_hash::TreeHash; use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; @@ -1985,45 +1986,55 @@ async fn pruning_test( check_no_blocks_exist(&harness, stray_blocks.values()); } -#[test] -fn garbage_collect_temp_states_from_failed_block() { +#[tokio::test] +async fn garbage_collect_temp_states_from_failed_block() { let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - // Use a `block_on_dangerous` rather than an async test to stop spawned processes from holding - // a reference to the store. - harness.chain.task_executor.clone().block_on_dangerous( - async move { - let slots_per_epoch = E::slots_per_epoch(); + // Wrap these functions to ensure the variables are dropped before we try to open another + // instance of the store. + let mut store = { + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let genesis_state = harness.get_current_state(); - let block_slot = Slot::new(2 * slots_per_epoch); - let (signed_block, state) = harness.make_block(genesis_state, block_slot).await; + let slots_per_epoch = E::slots_per_epoch(); - let (mut block, _) = signed_block.deconstruct(); + let genesis_state = harness.get_current_state(); + let block_slot = Slot::new(2 * slots_per_epoch); + let (signed_block, state) = harness.make_block(genesis_state, block_slot).await; - // Mutate the block to make it invalid, and re-sign it. - *block.state_root_mut() = Hash256::repeat_byte(0xff); - let proposer_index = block.proposer_index() as usize; - let block = block.sign( - &harness.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), - &harness.spec, - ); + let (mut block, _) = signed_block.deconstruct(); - // The block should be rejected, but should store a bunch of temporary states. - harness.set_current_slot(block_slot); - harness.process_block_result(block).await.unwrap_err(); + // Mutate the block to make it invalid, and re-sign it. + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = block.sign( + &harness.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + ); - assert_eq!( - store.iter_temporary_state_roots().count(), - block_slot.as_usize() - 1 - ); - }, - "test", - ); + // The block should be rejected, but should store a bunch of temporary states. + harness.set_current_slot(block_slot); + harness.process_block_result(block).await.unwrap_err(); + + assert_eq!( + store.iter_temporary_state_roots().count(), + block_slot.as_usize() - 1 + ); + store + }; + + // Wait until all the references to the store have been dropped, this helps ensure we can + // re-open the store later. + loop { + store = if let Err(store_arc) = Arc::try_unwrap(store) { + sleep(Duration::from_millis(500)).await; + store_arc + } else { + break; + } + } // On startup, the store should garbage collect all the temporary states. let store = get_store(&db_path); From 9bd384a573c869f3a59752bae4f75023d394052e Mon Sep 17 00:00:00 2001 From: Divma Date: Fri, 23 Sep 2022 03:52:45 +0000 Subject: [PATCH 06/27] send attnet unsubscription event on random subnet expiry (#3600) ## Issue Addressed :lady_beetle: in which we don't actually unsubscribe from a random long lived subnet when it expires ## Proposed Changes Remove code addressing a specific case in which we are subscribed to all subnets and handle the removal of the long lived subnet. I don't think the special case code is particularly important as, if someone is running with that many validators to be subscribed to all subnets, it should use `--subscribe-all-subnets` instead ## Additional Info Noticed on some test nodes climbing bandwidth usage periodically (around 27hours, the time of subnet expirations) I'm running this code to test this does not happen anymore, but I think it should be good now --- .../src/subnet_service/attestation_subnets.rs | 26 +++---------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index ecca3c9682..ee8ba24fc3 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -564,26 +564,8 @@ impl AttestationService { /// /// This function selects a new subnet to join, or extends the expiry if there are no more /// available subnets to choose from. - fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId, end_slot: Slot) { - let subnet_count = self.beacon_chain.spec.attestation_subnet_count; - if self.long_lived_subscriptions.len() == (subnet_count - 1) as usize { - let end_slot = end_slot + self.long_lived_subnet_subscription_slots; - // This is just an extra accuracy precaution, we could use the default timeout if - // needed. - if let Some(time_to_subscription_end) = - self.beacon_chain.slot_clock.duration_to_slot(end_slot) - { - // We are at capacity, simply increase the timeout of the current subnet. - self.long_lived_subscriptions.insert_at( - subnet_id, - end_slot + 1, - time_to_subscription_end, - ); - } else { - self.long_lived_subscriptions.insert(subnet_id, end_slot); - } - return; - } + fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId) { + self.handle_removed_subnet(subnet_id, SubscriptionKind::LongLived); // Remove the ENR bitfield bit and choose a new random on from the available subnets // Subscribe to a new random subnet. @@ -718,8 +700,8 @@ impl Stream for AttestationService { // Process any random subnet expiries. match self.long_lived_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok((subnet_id, end_slot)))) => { - self.handle_random_subnet_expiry(subnet_id, end_slot) + Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { + self.handle_random_subnet_expiry(subnet_id) } Poll::Ready(Some(Err(e))) => { error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); From bd873e7162261a8369a08312d2643a86f798b745 Mon Sep 17 00:00:00 2001 From: Divma Date: Fri, 23 Sep 2022 03:52:46 +0000 Subject: [PATCH 07/27] New rust lints for rustc 1.64.0 (#3602) ## Issue Addressed fixes lints from the last rust release ## Proposed Changes Fix the lints, most of the lints by `clippy::question-mark` are false positives in the form of https://github.com/rust-lang/rust-clippy/issues/9518 so it's allowed for now ## Additional Info --- Makefile | 3 ++- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +++--- .../beacon_chain/src/schema_change/migration_schema_v7.rs | 6 +++--- beacon_node/beacon_chain/src/sync_committee_verification.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 2 +- beacon_node/http_api/src/lib.rs | 2 +- consensus/merkle_proof/src/lib.rs | 4 +--- consensus/ssz_types/src/fixed_vector.rs | 2 +- consensus/ssz_types/src/variable_list.rs | 2 +- validator_client/src/preparation_service.rs | 2 +- 10 files changed, 15 insertions(+), 16 deletions(-) diff --git a/Makefile b/Makefile index 6b5c6b3e5d..6119f0dc0f 100644 --- a/Makefile +++ b/Makefile @@ -153,7 +153,8 @@ lint: -A clippy::derive_partial_eq_without_eq \ -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ - -A clippy::vec-init-then-push + -A clippy::vec-init-then-push \ + -A clippy::question-mark nightly-lint: cp .github/custom/clippy.toml . diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b0fac779a4..2a449f64ba 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -809,7 +809,7 @@ impl BeaconChain { if let Some(request_root) = request_root_opt { if let Ok(prev_root) = state.get_block_root(prev_slot) { - return Ok(Some((*prev_root != request_root).then(|| request_root))); + return Ok(Some((*prev_root != request_root).then_some(request_root))); } } @@ -831,7 +831,7 @@ impl BeaconChain { slot: curr_slot, }); } - Ok((curr_root != prev_root).then(|| curr_root)) + Ok((curr_root != prev_root).then_some(curr_root)) } else { Ok(None) } @@ -2871,7 +2871,7 @@ impl BeaconChain { .pubkeys .iter() .zip(sync_aggregate.sync_committee_bits.iter()) - .filter_map(|(pubkey, bit)| bit.then(|| pubkey)) + .filter_map(|(pubkey, bit)| bit.then_some(pubkey)) .collect::>(); validator_monitor.register_sync_aggregate_in_block( diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index 4a9a78db7b..d953d30027 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -212,7 +212,7 @@ fn map_relevant_epochs_to_roots( let root = iter .find_map(|next| match next { - Ok((root, slot)) => (slot == start_slot).then(|| Ok(root)), + Ok((root, slot)) => (slot == start_slot).then_some(Ok(root)), Err(e) => Some(Err(format!("{:?}", e))), }) .transpose()? @@ -286,7 +286,7 @@ fn find_finalized_descendant_heads( .filter_map(|(index, node)| { (!nodes_referenced_as_parents.contains(&index) && fork_choice.is_descendant(finalized_root, node.root)) - .then(|| HeadInfo { + .then_some(HeadInfo { index, root: node.root, slot: node.slot, @@ -306,7 +306,7 @@ fn update_store_justified_checkpoint( .filter_map(|node| { (node.finalized_checkpoint == Some(persisted_fork_choice.fork_choice_store.finalized_checkpoint)) - .then(|| node.justified_checkpoint) + .then_some(node.justified_checkpoint) .flatten() }) .max_by_key(|justified_checkpoint| justified_checkpoint.epoch) diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 7d56af7935..4b4228e71d 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -343,7 +343,7 @@ impl VerifiedSyncContribution { let participant_pubkeys = sync_subcommittee_pubkeys .into_iter() .zip(contribution.aggregation_bits.iter()) - .filter_map(|(pubkey, bit)| bit.then(|| pubkey)) + .filter_map(|(pubkey, bit)| bit.then_some(pubkey)) .collect::>(); // Ensure that all signatures are valid. diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index f49563b149..d5a8880381 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1130,7 +1130,7 @@ where selection_proof .is_aggregator::() .expect("should determine aggregator") - .then(|| validator_index) + .then_some(validator_index) })?; let default = SyncCommitteeContribution::from_message( diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 312f2a29e2..5b4fa5816d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2531,7 +2531,7 @@ pub fn serve( || matches!(validator_status, ValidatorStatus::Active); // Filter out validators who are not 'active' or 'pending'. - is_active_or_pending.then(|| { + is_active_or_pending.then_some({ ( ProposerPreparationData { validator_index: validator_index as u64, diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 84f27bdb86..da9b78ff11 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -100,9 +100,7 @@ impl MerkleTree { (Leaf(_), Leaf(_)) => return Err(MerkleTreeError::MerkleTreeFull), // There is a right node so insert in right node (Node(_, _, _), Node(_, _, _)) => { - if let Err(e) = right.push_leaf(elem, depth - 1) { - return Err(e); - } + right.push_leaf(elem, depth - 1)?; } // Both branches are zero, insert in left one (Zero(_), Zero(_)) => { diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index e64e76ef4d..1ad82a3841 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -326,7 +326,7 @@ mod test { assert_eq!(fixed[0], 1); assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((&fixed[..]).len(), 8192); + assert_eq!((fixed[..]).len(), 8192); fixed[1] = 3; assert_eq!(fixed[1], 3); diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index f23872c87f..a342b361ed 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -308,7 +308,7 @@ mod test { assert_eq!(fixed[0], 1); assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((&fixed[..]).len(), 2); + assert_eq!((fixed[..]).len(), 2); fixed[1] = 3; assert_eq!(fixed[1], 3); diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index d4178f2c48..af152545e2 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -294,7 +294,7 @@ impl PreparationService { proposal_data.fee_recipient.and_then(|fee_recipient| { proposal_data .builder_proposals - .then(|| ValidatorRegistrationKey { + .then_some(ValidatorRegistrationKey { fee_recipient, gas_limit: proposal_data.gas_limit, pubkey, From 01e84b71f524968f5b940fbd2fa31d29408b6581 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Sep 2022 01:17:36 +0000 Subject: [PATCH 08/27] v3.1.2 (#3603) ## Issue Addressed NA ## Proposed Changes Bump versions to v3.1.2 ## Additional Info - ~~Blocked on several PRs.~~ - ~~Requires further testing.~~ --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 071cc0d797..64eab1442b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,7 +439,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.1.1" +version = "3.1.2" dependencies = [ "beacon_chain", "clap", @@ -597,7 +597,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.1.1" +version = "3.1.2" dependencies = [ "beacon_node", "clap", @@ -3110,7 +3110,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.1.1" +version = "3.1.2" dependencies = [ "account_utils", "beacon_chain", @@ -3610,7 +3610,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.1.1" +version = "3.1.2" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 1832778c56..d48dd6cac0 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.1.1" +version = "3.1.2" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index be34f23d70..3f406e88fc 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.1.1-", - fallback = "Lighthouse/v3.1.1" + prefix = "Lighthouse/v3.1.2-", + fallback = "Lighthouse/v3.1.2" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 3cc7e7adde..6b9bb33838 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.1.1" +version = "3.1.2" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 1ae1c9b0b0..94bb62b1af 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.1.1" +version = "3.1.2" authors = ["Sigma Prime "] edition = "2021" autotests = false From abcebf276f48dc65971fde8f738feb256308cfd7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 28 Sep 2022 17:45:09 +0000 Subject: [PATCH 09/27] Add guide to MEV logs (#3611) ## Proposed Changes Add some docs on checking the builder configuration, which is a frequently asked question on Discord. ## Additional Info My text editor also insisted on stripping some trailing newlines, but can put 'em back if we want --- book/src/builders.md | 77 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 64 insertions(+), 13 deletions(-) diff --git a/book/src/builders.md b/book/src/builders.md index 109a75a040..e57a4fad14 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -45,24 +45,24 @@ relays, run one of the following services and configure lighthouse to use it wit ## Validator Client Configuration -In the validator client you can configure gas limit and fee recipient on a per-validator basis. If no gas limit is -configured, Lighthouse will use a default gas limit of 30,000,000, which is the current default value used in execution -engines. You can also enable or disable use of external builders on a per-validator basis rather than using +In the validator client you can configure gas limit and fee recipient on a per-validator basis. If no gas limit is +configured, Lighthouse will use a default gas limit of 30,000,000, which is the current default value used in execution +engines. You can also enable or disable use of external builders on a per-validator basis rather than using `--builder-proposals`, which enables external builders for all validators. In order to manage these configurations per-validator, you can either make updates to the `validator_definitions.yml` file or you can use the HTTP requests described below. Both the gas limit and fee recipient will be passed along as suggestions to connected builders. If there is a discrepancy in either, it will *not* keep you from proposing a block with the builder. This is because the bounds on gas limit are -calculated based on prior execution blocks, so an honest external builder will make sure that even if your -requested gas limit value is out of the specified range, a valid gas limit in the direction of your request will be +calculated based on prior execution blocks, so an honest external builder will make sure that even if your +requested gas limit value is out of the specified range, a valid gas limit in the direction of your request will be used in constructing the block. Depending on the connected relay, payment to the proposer might be in the form of a -transaction within the block to the fee recipient, so a discrepancy in fee recipient might not indicate that there -is something afoot. +transaction within the block to the fee recipient, so a discrepancy in fee recipient might not indicate that there +is something afoot. -> Note: The gas limit configured here is effectively a vote on block size, so the configuration should not be taken lightly. +> Note: The gas limit configured here is effectively a vote on block size, so the configuration should not be taken lightly. > 30,000,000 is currently seen as a value balancing block size with how expensive it is for -> the network to validate blocks. So if you don't feel comfortable making an informed "vote", using the default value is +> the network to validate blocks. So if you don't feel comfortable making an informed "vote", using the default value is > encouraged. We will update the default value if the community reaches a rough consensus on a new value. ### Set Gas Limit via HTTP @@ -157,20 +157,71 @@ By default, Lighthouse is strict with these conditions, but we encourage users t - `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder API will always be used for payload construction, regardless of recent chain conditions. -## Builder Profit Threshold +## Builder Profit Threshold If you are generally uneasy with the risks associated with outsourced payload production (liveness/censorship) but would consider using it for the chance of out-sized rewards, this flag may be useful: `--builder-profit-threshold ` -The number provided indicates the minimum reward that an external payload must provide the proposer for it to be considered +The number provided indicates the minimum reward that an external payload must provide the proposer for it to be considered for inclusion in a proposal. For example, if you'd only like to use an external payload for a reward of >= 0.25 ETH, you -would provide your beacon node with `--builder-profit-threshold 250000000000000000`. If it's your turn to propose and the +would provide your beacon node with `--builder-profit-threshold 250000000000000000`. If it's your turn to propose and the most valuable payload offered by builders is only 0.1 ETH, the local execution engine's payload will be used. Currently, -this threshold just looks at the value of the external payload. No comparison to the local payload is made, although +this threshold just looks at the value of the external payload. No comparison to the local payload is made, although this feature will likely be added in the future. +## Checking your builder config + +You can check that your builder is configured correctly by looking for these log messages. + +On start-up, the beacon node will log if a builder is configured: + +``` +INFO Connected to external block builder +``` + +At regular intervals the validator client will log that it successfully registered its validators +with the builder network: + +``` +INFO Published validator registrations to the builder network +``` + +When you succesfully propose a block using a builder, you will see this log on the beacon node: + +``` +INFO Successfully published a block to the builder network +``` + +If you don't see that message around the time of your proposals, check your beacon node logs +for `INFO` and `WARN` messages indicating why the builder was not used. + +Examples of messages indicating fallback to a locally produced block are: + +``` +INFO No payload provided by connected builder. +``` + +``` +WARN Unable to retrieve a payload from a connected builder +``` + +``` +INFO The value offered by the connected builder does not meet the configured profit threshold. +``` + +``` +INFO Due to poor chain health the local execution engine will be used for payload construction. +``` + +In case of fallback you should see a log indicating that the locally produced payload was +used in place of one from the builder: + +``` +INFO Reconstructing a full block using a local payload +``` + [mev-rs]: https://github.com/ralexstokes/mev-rs [mev-boost]: https://github.com/flashbots/mev-boost [gas-limit-api]: https://ethereum.github.io/keymanager-APIs/#/Gas%20Limit From 6779912fe4fa0c09c73d5b4f8b3c7b8833df1903 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 28 Sep 2022 19:53:35 +0000 Subject: [PATCH 10/27] Publish subscriptions to all beacon nodes (#3529) ## Issue Addressed Resolves #3516 ## Proposed Changes Adds a beacon fallback function for running a beacon node http query on all available fallbacks instead of returning on a first successful result. Uses the new `run_on_all` method for attestation and sync committee subscriptions. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. --- lighthouse/tests/validator_client.rs | 16 ++ validator_client/src/beacon_node_fallback.rs | 160 +++++++++++++++++- validator_client/src/block_service.rs | 6 +- validator_client/src/cli.rs | 10 ++ validator_client/src/config.rs | 4 + validator_client/src/duties_service.rs | 4 +- validator_client/src/lib.rs | 8 +- validator_client/src/preparation_service.rs | 4 +- .../src/sync_committee_service.rs | 2 +- 9 files changed, 198 insertions(+), 16 deletions(-) diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index a9b76c2754..6608b7ca64 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -442,3 +442,19 @@ fn monitoring_endpoint() { assert_eq!(api_conf.update_period_secs, Some(30)); }); } +#[test] +fn disable_run_on_all_default() { + CommandLineTest::new().run().with_config(|config| { + assert!(!config.disable_run_on_all); + }); +} + +#[test] +fn disable_run_on_all() { + CommandLineTest::new() + .flag("disable-run-on-all", None) + .run() + .with_config(|config| { + assert!(config.disable_run_on_all); + }); +} diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index df6c949aef..82f085c43f 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -105,11 +105,13 @@ impl Error { } /// The list of errors encountered whilst attempting to perform a query. -pub struct AllErrored(pub Vec<(String, Error)>); +pub struct Errors(pub Vec<(String, Error)>); -impl fmt::Display for AllErrored { +impl fmt::Display for Errors { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "All endpoints failed")?; + if !self.0.is_empty() { + write!(f, "Some endpoints failed, num_failed: {}", self.0.len())?; + } for (i, (id, error)) in self.0.iter().enumerate() { let comma = if i + 1 < self.0.len() { "," } else { "" }; @@ -294,15 +296,22 @@ impl CandidateBeaconNode { pub struct BeaconNodeFallback { candidates: Vec>, slot_clock: Option, + disable_run_on_all: bool, spec: ChainSpec, log: Logger, } impl BeaconNodeFallback { - pub fn new(candidates: Vec>, spec: ChainSpec, log: Logger) -> Self { + pub fn new( + candidates: Vec>, + disable_run_on_all: bool, + spec: ChainSpec, + log: Logger, + ) -> Self { Self { candidates, slot_clock: None, + disable_run_on_all, spec, log, } @@ -396,7 +405,7 @@ impl BeaconNodeFallback { require_synced: RequireSynced, offline_on_failure: OfflineOnFailure, func: F, - ) -> Result> + ) -> Result> where F: Fn(&'a BeaconNodeHttpClient) -> R, R: Future>, @@ -486,6 +495,145 @@ impl BeaconNodeFallback { } // There were no candidates already ready and we were unable to make any of them ready. - Err(AllErrored(errors)) + Err(Errors(errors)) + } + + /// Run `func` against all candidates in `self`, collecting the result of `func` against each + /// candidate. + /// + /// First this function will try all nodes with a suitable status. If no candidates are suitable + /// it will try updating the status of all unsuitable nodes and re-running `func` again. + /// + /// Note: This function returns `Ok(())` if `func` returned successfully on all beacon nodes. + /// It returns a list of errors along with the beacon node id that failed for `func`. + /// Since this ignores the actual result of `func`, this function should only be used for beacon + /// node calls whose results we do not care about, only that they completed successfully. + pub async fn run_on_all<'a, F, O, Err, R>( + &'a self, + require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, + func: F, + ) -> Result<(), Errors> + where + F: Fn(&'a BeaconNodeHttpClient) -> R, + R: Future>, + { + let mut results = vec![]; + let mut to_retry = vec![]; + let mut retry_unsynced = vec![]; + + // Run `func` using a `candidate`, returning the value or capturing errors. + // + // We use a macro instead of a closure here since it is not trivial to move `func` into a + // closure. + macro_rules! try_func { + ($candidate: ident) => {{ + inc_counter_vec(&ENDPOINT_REQUESTS, &[$candidate.beacon_node.as_ref()]); + + // There exists a race condition where `func` may be called when the candidate is + // actually not ready. We deem this an acceptable inefficiency. + match func(&$candidate.beacon_node).await { + Ok(val) => results.push(Ok(val)), + Err(e) => { + // If we have an error on this function, make the client as not-ready. + // + // There exists a race condition where the candidate may have been marked + // as ready between the `func` call and now. We deem this an acceptable + // inefficiency. + if matches!(offline_on_failure, OfflineOnFailure::Yes) { + $candidate.set_offline().await; + } + results.push(Err(( + $candidate.beacon_node.to_string(), + Error::RequestFailed(e), + ))); + inc_counter_vec(&ENDPOINT_ERRORS, &[$candidate.beacon_node.as_ref()]); + } + } + }}; + } + + // First pass: try `func` on all synced and ready candidates. + // + // This ensures that we always choose a synced node if it is available. + for candidate in &self.candidates { + match candidate.status(RequireSynced::Yes).await { + Err(CandidateError::NotSynced) if require_synced == false => { + // This client is unsynced we will try it after trying all synced clients + retry_unsynced.push(candidate); + } + Err(_) => { + // This client was not ready on the first pass, we might try it again later. + to_retry.push(candidate); + } + Ok(_) => try_func!(candidate), + } + } + + // Second pass: try `func` on ready unsynced candidates. This only runs if we permit + // unsynced candidates. + // + // Due to async race-conditions, it is possible that we will send a request to a candidate + // that has been set to an offline/unready status. This is acceptable. + if require_synced == false { + for candidate in retry_unsynced { + try_func!(candidate); + } + } + + // Third pass: try again, attempting to make non-ready clients become ready. + for candidate in to_retry { + // If the candidate hasn't luckily transferred into the correct state in the meantime, + // force an update of the state. + let new_status = match candidate.status(require_synced).await { + Ok(()) => Ok(()), + Err(_) => { + candidate + .refresh_status(self.slot_clock.as_ref(), &self.spec, &self.log) + .await + } + }; + + match new_status { + Ok(()) => try_func!(candidate), + Err(CandidateError::NotSynced) if require_synced == false => try_func!(candidate), + Err(e) => { + results.push(Err(( + candidate.beacon_node.to_string(), + Error::Unavailable(e), + ))); + } + } + } + + let errors: Vec<_> = results.into_iter().filter_map(|res| res.err()).collect(); + + if !errors.is_empty() { + Err(Errors(errors)) + } else { + Ok(()) + } + } + + /// Call `func` on first beacon node that returns success or on all beacon nodes + /// depending on the value of `disable_run_on_all`. + pub async fn run<'a, F, Err, R>( + &'a self, + require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, + func: F, + ) -> Result<(), Errors> + where + F: Fn(&'a BeaconNodeHttpClient) -> R, + R: Future>, + { + if self.disable_run_on_all { + self.first_success(require_synced, offline_on_failure, func) + .await?; + Ok(()) + } else { + self.run_on_all(require_synced, offline_on_failure, func) + .await + } } } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index ac1ba11674..b0b69a4f50 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,4 +1,4 @@ -use crate::beacon_node_fallback::{AllErrored, Error as FallbackError}; +use crate::beacon_node_fallback::{Error as FallbackError, Errors}; use crate::{ beacon_node_fallback::{BeaconNodeFallback, RequireSynced}, graffiti_file::GraffitiFile, @@ -20,8 +20,8 @@ pub enum BlockError { Irrecoverable(String), } -impl From> for BlockError { - fn from(e: AllErrored) -> Self { +impl From> for BlockError { + fn from(e: Errors) -> Self { if e.0.iter().any(|(_, error)| { matches!( error, diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 5c7205a4ae..ef2e66676a 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -26,6 +26,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .takes_value(true), ) + .arg( + Arg::with_name("disable-run-on-all") + .long("disable-run-on-all") + .value_name("DISABLE_RUN_ON_ALL") + .help("By default, Lighthouse publishes attestation, sync committee subscriptions \ + and proposer preparation messages to all beacon nodes provided in the \ + `--beacon-nodes flag`. This option changes that behaviour such that these \ + api calls only go out to the first available and synced beacon node") + .takes_value(false) + ) // This argument is deprecated, use `--beacon-nodes` instead. .arg( Arg::with_name("server") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 22472f7512..277a4bd8de 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -61,6 +61,8 @@ pub struct Config { /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option>, + /// Disables publishing http api requests to all beacon nodes for select api calls. + pub disable_run_on_all: bool, } impl Default for Config { @@ -96,6 +98,7 @@ impl Default for Config { builder_proposals: false, builder_registration_timestamp_override: None, gas_limit: None, + disable_run_on_all: false, } } } @@ -177,6 +180,7 @@ impl Config { } config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced"); + config.disable_run_on_all = cli_args.is_present("disable-run-on-all"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); config.init_slashing_protection = cli_args.is_present("init-slashing-protection"); config.use_long_timeouts = cli_args.is_present("use-long-timeouts"); diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 60b617e6c8..86b8ca870e 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -570,12 +570,12 @@ async fn poll_beacon_attesters( }); } - // If there are any subscriptions, push them out to the beacon node. + // If there are any subscriptions, push them out to beacon nodes if !subscriptions.is_empty() { let subscriptions_ref = &subscriptions; if let Err(e) = duties_service .beacon_nodes - .first_success( + .run( duties_service.require_synced, OfflineOnFailure::Yes, |beacon_node| async move { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 9db4cc0315..005a74edf6 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -327,8 +327,12 @@ impl ProductionValidatorClient { // Initialize the number of connected, avaliable beacon nodes to 0. set_gauge(&http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, 0); - let mut beacon_nodes: BeaconNodeFallback<_, T> = - BeaconNodeFallback::new(candidates, context.eth2_config.spec.clone(), log.clone()); + let mut beacon_nodes: BeaconNodeFallback<_, T> = BeaconNodeFallback::new( + candidates, + config.disable_run_on_all, + context.eth2_config.spec.clone(), + log.clone(), + ); // Perform some potentially long-running initialization tasks. let (genesis_time, genesis_validators_root) = tokio::select! { diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index af152545e2..fc80f2ded0 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -331,7 +331,7 @@ impl PreparationService { let preparation_entries = preparation_data.as_slice(); match self .beacon_nodes - .first_success( + .run( RequireSynced::Yes, OfflineOnFailure::Yes, |beacon_node| async move { @@ -349,7 +349,7 @@ impl PreparationService { ), Err(e) => error!( log, - "Unable to publish proposer preparation"; + "Unable to publish proposer preparation to all beacon nodes"; "error" => %e, ), } diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 1e6ff7a5b5..5b95945302 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -568,7 +568,7 @@ impl SyncCommitteeService { if let Err(e) = self .beacon_nodes - .first_success( + .run( RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { From b1d2510d1bb4d791c7c8e3da0a023fad408d307e Mon Sep 17 00:00:00 2001 From: Divma Date: Thu, 29 Sep 2022 01:50:11 +0000 Subject: [PATCH 11/27] Libp2p v0.48.0 upgrade (#3547) ## Issue Addressed Upgrades libp2p to v.0.47.0. This is the compilation of - [x] #3495 - [x] #3497 - [x] #3491 - [x] #3546 - [x] #3553 Co-authored-by: Age Manning --- .github/workflows/local-testnet.yml | 3 +- .github/workflows/test-suite.yml | 92 +- Cargo.lock | 159 +-- Cross.toml | 4 +- Dockerfile | 2 +- Makefile | 2 +- beacon_node/http_api/tests/common.rs | 4 +- beacon_node/lighthouse_network/Cargo.toml | 4 +- .../src/discovery/enr_ext.rs | 2 - .../lighthouse_network/src/discovery/mod.rs | 43 +- beacon_node/lighthouse_network/src/lib.rs | 9 +- .../src/peer_manager/mod.rs | 16 +- .../src/peer_manager/network_behaviour.rs | 14 +- .../src/peer_manager/peerdb/score.rs | 2 +- beacon_node/lighthouse_network/src/rpc/mod.rs | 1 + beacon_node/lighthouse_network/src/service.rs | 573 -------- .../src/service/api_types.rs | 101 ++ .../src/service/behaviour.rs | 34 + .../{behaviour => service}/gossip_cache.rs | 0 .../gossipsub_scoring_parameters.rs | 0 .../src/{behaviour => service}/mod.rs | 1263 +++++++++-------- .../lighthouse_network/src/service/utils.rs | 288 ++++ .../tests/common/behaviour.rs | 3 +- .../lighthouse_network/tests/common/mod.rs | 23 +- .../lighthouse_network/tests/pm_tests.rs | 4 +- .../lighthouse_network/tests/rpc_tests.rs | 184 +-- beacon_node/network/src/service.rs | 246 ++-- book/src/installation-source.md | 14 +- book/src/setup.md | 2 + .../aarch64-unknown-linux-gnu.dockerfile | 14 + .../cross/x86_64-unknown-linux-gnu.dockerfile | 14 + 31 files changed, 1506 insertions(+), 1614 deletions(-) delete mode 100644 beacon_node/lighthouse_network/src/service.rs create mode 100644 beacon_node/lighthouse_network/src/service/api_types.rs create mode 100644 beacon_node/lighthouse_network/src/service/behaviour.rs rename beacon_node/lighthouse_network/src/{behaviour => service}/gossip_cache.rs (100%) rename beacon_node/lighthouse_network/src/{behaviour => service}/gossipsub_scoring_parameters.rs (100%) rename beacon_node/lighthouse_network/src/{behaviour => service}/mod.rs (56%) create mode 100644 beacon_node/lighthouse_network/src/service/utils.rs create mode 100644 scripts/cross/aarch64-unknown-linux-gnu.dockerfile create mode 100644 scripts/cross/x86_64-unknown-linux-gnu.dockerfile diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 35032a0932..c688c0df33 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -20,7 +20,8 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: npm install ganache@latest --global diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 1a7d78f61f..a3e9625b50 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -50,6 +50,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run tests in release @@ -68,7 +70,7 @@ jobs: node-version: '14' - name: Install windows build tools run: | - choco install python visualstudio2019-workload-vctools -y + choco install python protoc visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - name: Install ganache run: npm install -g ganache --loglevel verbose @@ -90,6 +92,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run beacon_chain tests for all known forks run: make test-beacon-chain op-pool-tests: @@ -100,6 +104,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run operation_pool tests for all known forks run: make test-op-pool slasher-tests: @@ -120,6 +126,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run tests in debug @@ -132,6 +140,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: @@ -142,6 +152,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run consensus-spec-tests with blst, milagro and fake_crypto run: make test-ef dockerfile-ubuntu: @@ -164,6 +176,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim that starts from an eth1 contract @@ -176,6 +190,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim and go through the merge transition @@ -188,6 +204,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim without an eth1 connection @@ -197,35 +215,39 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Run the syncing simulator - run: cargo run --release --bin simulator syncing-sim + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Run the syncing simulator + run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: - name: doppelganger-protection-test - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Install lighthouse and lcli - run: | - make - make install-lcli - - name: Run the doppelganger protection success test script - run: | - cd scripts/tests - ./doppelganger_protection.sh success - - name: Run the doppelganger protection failure test script - run: | - cd scripts/tests - ./doppelganger_protection.sh failure + name: doppelganger-protection-test + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Install lighthouse and lcli + run: | + make + make install-lcli + - name: Run the doppelganger protection success test script + run: | + cd scripts/tests + ./doppelganger_protection.sh success + - name: Run the doppelganger protection failure test script + run: | + cd scripts/tests + ./doppelganger_protection.sh failure execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu runs-on: ubuntu-latest @@ -240,6 +262,8 @@ jobs: dotnet-version: '6.0.201' - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run exec engine integration tests in release run: make test-exec-engine check-benchmarks: @@ -250,6 +274,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Typecheck benchmark code without running it run: make check-benches check-consensus: @@ -270,6 +296,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness @@ -289,6 +317,8 @@ jobs: git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a cargo build --release --bin cargo-clippy --bin clippy-driver cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run Clippy with the disallowed-from-async lint run: make nightly-lint check-msrv: @@ -299,6 +329,8 @@ jobs: - uses: actions/checkout@v1 - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run cargo check run: cargo check --workspace arbitrary-check: @@ -339,6 +371,8 @@ jobs: run: rustup toolchain install $PINNED_NIGHTLY # NOTE: cargo-udeps version is pinned until this issue is resolved: # https://github.com/est31/cargo-udeps/issues/135 + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install cargo-udeps run: cargo install cargo-udeps --locked --force --version 0.1.30 - name: Create Cargo config dir diff --git a/Cargo.lock b/Cargo.lock index 64eab1442b..cfefa6c116 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3226,9 +3226,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.45.1" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029" +checksum = "94c996fe5bfdba47f5a5af71d48ecbe8cec900b7b97391cc1d3ba1afb0e2d3b6" dependencies = [ "bytes", "futures", @@ -3236,7 +3236,7 @@ dependencies = [ "getrandom 0.2.7", "instant", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3293,9 +3293,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979" +checksum = "b1fff5bd889c82a0aec668f2045edd066f559d4e5c40354e5a4c77ac00caac38" dependencies = [ "asn1_der", "bs58", @@ -3313,10 +3313,9 @@ dependencies = [ "multistream-select 0.11.0", "parking_lot 0.12.1", "pin-project 1.0.11", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "rand 0.8.5", - "ring", "rw-stream-sink 0.3.0", "sha2 0.10.2", "smallvec", @@ -3328,12 +3327,12 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268" +checksum = "6cb3c16e3bb2f76c751ae12f0f26e788c89d353babdded40411e7923f01fc978" dependencies = [ "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "parking_lot 0.12.1", "smallvec", @@ -3342,9 +3341,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.38.1" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43e064ba4d7832e01c738626c6b274ae100baba05f5ffcc7b265c2a3ed398108" +checksum = "2185aac44b162c95180ae4ddd1f4dfb705217ea1cb8e16bdfc70d31496fd80fa" dependencies = [ "asynchronous-codec", "base64", @@ -3354,12 +3353,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "libp2p-swarm", "log", "prometheus-client", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3370,19 +3369,19 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.36.1" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984" +checksum = "f19440c84b509d69b13f0c9c28caa9bd3a059d25478527e937e86761f25c821e" dependencies = [ "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "libp2p-swarm", "log", "lru", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "prost-codec", "smallvec", "thiserror", @@ -3391,11 +3390,11 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "564a7e5284d7d9b3140fdfc3cb6567bc32555e86a21de5604c2ec85da05cf384" +checksum = "a74ab339e8b5d989e8c1000a78adb5c064a6319245bb22d1e70b415ec18c39b8" dependencies = [ - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3404,14 +3403,14 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640" +checksum = "ce53169351226ee0eb18ee7bef8d38f308fa8ad7244f986ae776390c0ae8a44d" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "nohash-hasher", "parking_lot 0.12.1", @@ -3422,18 +3421,18 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.36.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad" +checksum = "7cb0f939a444b06779ce551b3d78ebf13970ac27906ada452fd70abd160b09b8" dependencies = [ "bytes", "curve25519-dalek 3.2.0", "futures", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3444,33 +3443,33 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f" +checksum = "328e8c654a55ac7f093eb96dfd0386244dd337f2bd2822dc019522b743ea8add" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "unsigned-varint 0.7.1", "void", ] [[package]] name = "libp2p-swarm" -version = "0.36.1" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346" +checksum = "70ad2db60c06603606b54b58e4247e32efec87a93cb4387be24bf32926c600f2" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "pin-project 1.0.11", "rand 0.7.3", @@ -3481,26 +3480,27 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.27.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f693c8c68213034d472cbb93a379c63f4f307d97c06f1c41e4985de481687a5" +checksum = "1f02622b9dd150011b4eeec387f8bd013189a2f27da08ba363e7c6e606d77a48" dependencies = [ + "heck 0.4.0", "quote", "syn", ] [[package]] name = "libp2p-tcp" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892" +checksum = "9675432b4c94b3960f3d2c7e57427b81aea92aab67fd0eebef09e2ae0ff54895" dependencies = [ "futures", "futures-timer", "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "socket2", "tokio", @@ -3508,14 +3508,14 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.35.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5" +checksum = "de8a9e825cc03f2fc194d2e1622113d7fe18e1c7f4458a582b83140c9b9aea27" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "parking_lot 0.12.1", "quicksink", @@ -3527,12 +3527,12 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.37.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" +checksum = "b74ec8dc042b583f0b2b93d52917f3b374c1e4b1cfa79ee74c7672c41257694c" dependencies = [ "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "parking_lot 0.12.1", "thiserror", "yamux", @@ -4440,15 +4440,6 @@ dependencies = [ "types", ] -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] - [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -4876,21 +4867,21 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" +checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603" dependencies = [ "dtoa", "itoa 1.0.2", - "owning_ref", + "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] [[package]] name = "prometheus-client-derive-text-encode" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e12d01b9d66ad9eb4529c57666b6263fc1993cb30261d83ead658fdd932652" +checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", @@ -4909,12 +4900,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.10.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" dependencies = [ "bytes", - "prost-derive 0.10.1", + "prost-derive 0.11.0", ] [[package]] @@ -4939,21 +4930,19 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.10.4" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" +checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" dependencies = [ "bytes", - "cfg-if", - "cmake", "heck 0.4.0", "itertools", "lazy_static", "log", "multimap", "petgraph", - "prost 0.10.4", - "prost-types 0.10.1", + "prost 0.11.0", + "prost-types 0.11.1", "regex", "tempfile", "which", @@ -4961,13 +4950,13 @@ dependencies = [ [[package]] name = "prost-codec" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" +checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987" dependencies = [ "asynchronous-codec", "bytes", - "prost 0.10.4", + "prost 0.11.0", "thiserror", "unsigned-varint 0.7.1", ] @@ -4987,9 +4976,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" dependencies = [ "anyhow", "itertools", @@ -5010,12 +4999,12 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" dependencies = [ "bytes", - "prost 0.10.4", + "prost 0.11.0", ] [[package]] @@ -6272,12 +6261,6 @@ dependencies = [ "syn", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "state_processing" version = "0.2.0" diff --git a/Cross.toml b/Cross.toml index d5f7a5d506..963e22d0e0 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -pre-build = ["apt-get install -y cmake clang-3.9"] +dockerfile = './scripts/cross/x86_64-unknown-linux-gnu.dockerfile' [target.aarch64-unknown-linux-gnu] -pre-build = ["apt-get install -y cmake clang-3.9"] +dockerfile = './scripts/cross/aarch64-unknown-linux-gnu.dockerfile' diff --git a/Dockerfile b/Dockerfile index 86a69c6539..72423b17c6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES ENV FEATURES $FEATURES diff --git a/Makefile b/Makefile index 6119f0dc0f..3bf23a4cea 100644 --- a/Makefile +++ b/Makefile @@ -179,7 +179,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 --ignore RUSTSEC-2022-0040 + cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 032e1346fb..a0dbf40b29 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -118,9 +118,7 @@ pub async fn create_api_server_on_port( // Only a peer manager can add peers, so we create a dummy manager. let config = lighthouse_network::peer_manager::config::Config::default(); - let mut pm = PeerManager::new(config, network_globals.clone(), &log) - .await - .unwrap(); + let mut pm = PeerManager::new(config, network_globals.clone(), &log).unwrap(); // add a peer let peer_id = PeerId::random(); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index c6ba530508..e5af0a7499 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -37,12 +37,12 @@ directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" -prometheus-client = "0.16.0" +prometheus-client = "0.18.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.1.1" [dependencies.libp2p] -version = "0.45.1" +version = "0.48.0" default-features = false features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 81eaaaf1ba..1001efe231 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -232,7 +232,6 @@ impl CombinedKeyExt for CombinedKey { .expect("libp2p key must be valid"); Ok(CombinedKey::from(ed_keypair)) } - _ => Err("ENR: Unsupported libp2p key type"), } } } @@ -266,7 +265,6 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result Err("Unsupported public key".into()), } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 23b8895cf3..d766fd23a3 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -7,8 +7,8 @@ pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder -use crate::behaviour::TARGET_SUBNET_PEERS; use crate::metrics; +use crate::service::TARGET_SUBNET_PEERS; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ @@ -21,6 +21,8 @@ pub use libp2p::core::identity::{Keypair, PublicKey}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; +use libp2p::multiaddr::Protocol; +use libp2p::swarm::AddressScore; pub use libp2p::{ core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, swarm::{ @@ -67,13 +69,11 @@ pub const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16; /// The threshold for updating `min_ttl` on a connected peer. const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); -/// The events emitted by polling discovery. -pub enum DiscoveryEvent { - /// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl` - /// of the peer if it is specified. - QueryResult(HashMap>), - /// This indicates that our local UDP socketaddr has been updated and we should inform libp2p. - SocketUpdated(SocketAddr), +/// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl` +/// of the peer if it is specified. +#[derive(Debug)] +pub struct DiscoveredPeers { + pub peers: HashMap>, } #[derive(Clone, PartialEq)] @@ -362,7 +362,7 @@ impl Discovery { } /// Returns an iterator over all enr entries in the DHT. - pub fn table_entries_enr(&mut self) -> Vec { + pub fn table_entries_enr(&self) -> Vec { self.discv5.table_entries_enr() } @@ -909,7 +909,7 @@ impl Discovery { impl NetworkBehaviour for Discovery { // Discovery is not a real NetworkBehaviour... type ConnectionHandler = libp2p::swarm::handler::DummyConnectionHandler; - type OutEvent = DiscoveryEvent; + type OutEvent = DiscoveredPeers; fn new_handler(&mut self) -> Self::ConnectionHandler { libp2p::swarm::handler::DummyConnectionHandler::default() @@ -976,11 +976,9 @@ impl NetworkBehaviour for Discovery { self.process_queue(); // Drive the queries and return any results from completed queries - if let Some(results) = self.poll_queries(cx) { + if let Some(peers) = self.poll_queries(cx) { // return the result to the peer manager - return Poll::Ready(NBAction::GenerateEvent(DiscoveryEvent::QueryResult( - results, - ))); + return Poll::Ready(NBAction::GenerateEvent(DiscoveredPeers { peers })); } // Process the server event stream @@ -1019,8 +1017,8 @@ impl NetworkBehaviour for Discovery { } */ } - Discv5Event::SocketUpdated(socket) => { - info!(self.log, "Address updated"; "ip" => %socket.ip(), "udp_port" => %socket.port()); + Discv5Event::SocketUpdated(socket_addr) => { + info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); metrics::check_nat(); // Discv5 will have updated our local ENR. We save the updated version @@ -1029,9 +1027,16 @@ impl NetworkBehaviour for Discovery { enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log); // update network globals *self.network_globals.local_enr.write() = enr; - return Poll::Ready(NBAction::GenerateEvent( - DiscoveryEvent::SocketUpdated(socket), - )); + // A new UDP socket has been detected. + // Build a multiaddr to report to libp2p + let mut address = Multiaddr::from(socket_addr.ip()); + // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling + // should handle this. + address.push(Protocol::Tcp(self.network_globals.listen_port_tcp())); + return Poll::Ready(NBAction::ReportObservedAddr { + address, + score: AddressScore::Finite(1), + }); } Discv5Event::EnrAdded { .. } | Discv5Event::TalkRequest(_) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index f679b7e657..be4da809cb 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -5,15 +5,14 @@ #[macro_use] extern crate lazy_static; -pub mod behaviour; mod config; +pub mod service; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; pub mod metrics; pub mod peer_manager; pub mod rpc; -mod service; pub mod types; pub use config::gossip_max_size; @@ -69,7 +68,6 @@ pub use crate::types::{ pub use prometheus_client; -pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; @@ -85,4 +83,7 @@ pub use peer_manager::{ peerdb::PeerDB, ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; -pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +// pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +pub use service::api_types::{PeerRequestId, Request, Response}; +pub use service::utils::*; +pub use service::{Gossipsub, NetworkEvent}; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 63d0816604..0f29135956 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,7 +1,7 @@ //! Implementation of Lighthouse's peer management system. -use crate::behaviour::TARGET_SUBNET_PEERS; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; +use crate::service::TARGET_SUBNET_PEERS; use crate::{error, metrics, Gossipsub}; use crate::{NetworkGlobals, PeerId}; use crate::{Subnet, SubnetDiscovery}; @@ -12,6 +12,7 @@ use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; use smallvec::SmallVec; +use std::collections::VecDeque; use std::{ sync::Arc, time::{Duration, Instant}, @@ -71,6 +72,8 @@ pub struct PeerManager { status_peers: HashSetDelay, /// The target number of peers we would like to connect to. target_peers: usize, + /// Peers queued to be dialed. + peers_to_dial: VecDeque<(PeerId, Option)>, /// A collection of sync committee subnets that we need to stay subscribed to. /// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run /// discovery queries for subnet peers if we disconnect from existing sync @@ -115,7 +118,7 @@ pub enum PeerManagerEvent { impl PeerManager { // NOTE: Must be run inside a tokio executor. - pub async fn new( + pub fn new( cfg: config::Config, network_globals: Arc>, log: &slog::Logger, @@ -135,6 +138,7 @@ impl PeerManager { Ok(PeerManager { network_globals, events: SmallVec::new(), + peers_to_dial: Default::default(), inbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_inbound)), outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)), status_peers: HashSetDelay::new(Duration::from_secs(status_interval)), @@ -360,8 +364,8 @@ impl PeerManager { /* Notifications from the Swarm */ // A peer is being dialed. - pub fn inject_dialing(&mut self, peer_id: &PeerId, enr: Option) { - self.inject_peer_connection(peer_id, ConnectingType::Dialing, enr); + pub fn dial_peer(&mut self, peer_id: &PeerId, enr: Option) { + self.peers_to_dial.push_back((*peer_id, enr)); } /// Reports if a peer is banned or not. @@ -1247,9 +1251,7 @@ mod tests { }; let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new_test_globals(&log); - PeerManager::new(config, Arc::new(globals), &log) - .await - .unwrap() + PeerManager::new(config, Arc::new(globals), &log).unwrap() } #[tokio::test] diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 3bda64f0b1..a19c6db657 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -3,6 +3,7 @@ use std::task::{Context, Poll}; use futures::StreamExt; use libp2p::core::connection::ConnectionId; use libp2p::core::ConnectedPoint; +use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::handler::DummyConnectionHandler; use libp2p::swarm::{ ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters, @@ -16,7 +17,7 @@ use crate::rpc::GoodbyeReason; use crate::types::SyncState; use super::peerdb::BanResult; -use super::{PeerManager, PeerManagerEvent, ReportSource}; +use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; impl NetworkBehaviour for PeerManager { type ConnectionHandler = DummyConnectionHandler; @@ -99,6 +100,17 @@ impl NetworkBehaviour for PeerManager { self.events.shrink_to_fit(); } + if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_front() { + self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr); + let handler = self.new_handler(); + return Poll::Ready(NetworkBehaviourAction::Dial { + opts: DialOpts::peer_id(peer_id) + .condition(PeerCondition::Disconnected) + .build(), + handler, + }); + } + Poll::Pending } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index accc0b60c5..fca665db98 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -5,7 +5,7 @@ //! As the logic develops this documentation will advance. //! //! The scoring algorithms are currently experimental. -use crate::behaviour::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; +use crate::service::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; use serde::Serialize; use std::time::Instant; use strum::AsRefStr; diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 0bedd423b2..7b0092ef71 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -90,6 +90,7 @@ impl std::fmt::Display for RPCSend { } /// Messages sent to the user from the RPC protocol. +#[derive(Debug)] pub struct RPCMessage { /// The peer that sent the message. pub peer_id: PeerId, diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs deleted file mode 100644 index bcd546fb00..0000000000 --- a/beacon_node/lighthouse_network/src/service.rs +++ /dev/null @@ -1,573 +0,0 @@ -use crate::behaviour::{ - save_metadata_to_disk, Behaviour, BehaviourEvent, PeerRequestId, Request, Response, -}; -use crate::config::NetworkLoad; -use crate::discovery::enr; -use crate::multiaddr::Protocol; -use crate::rpc::{GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, ReqId}; -use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipKind}; -use crate::EnrExt; -use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource}; -use futures::prelude::*; -use libp2p::core::{ - identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, -}; -use libp2p::{ - bandwidth::{BandwidthLogging, BandwidthSinks}, - core, noise, - swarm::{ConnectionLimits, SwarmBuilder, SwarmEvent}, - PeerId, Swarm, Transport, -}; -use prometheus_client::registry::Registry; -use slog::{crit, debug, info, o, trace, warn, Logger}; -use ssz::Decode; -use std::fs::File; -use std::io::prelude::*; -use std::pin::Pin; -use std::sync::Arc; -use std::time::Duration; -use types::{ChainSpec, EnrForkId, EthSpec, ForkContext}; - -use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; - -pub const NETWORK_KEY_FILENAME: &str = "key"; -/// The maximum simultaneous libp2p connections per peer. -const MAX_CONNECTIONS_PER_PEER: u32 = 1; -/// The filename to store our local metadata. -pub const METADATA_FILENAME: &str = "metadata"; - -/// The types of events than can be obtained from polling the libp2p service. -/// -/// This is a subset of the events that a libp2p swarm emits. -#[derive(Debug)] -pub enum Libp2pEvent { - /// A behaviour event - Behaviour(BehaviourEvent), - /// A new listening address has been established. - NewListenAddr(Multiaddr), - /// We reached zero listening addresses. - ZeroListeners, -} - -/// The configuration and state of the libp2p components for the beacon node. -pub struct Service { - /// The libp2p Swarm handler. - pub swarm: Swarm>, - /// The bandwidth logger for the underlying libp2p transport. - pub bandwidth: Arc, - /// This node's PeerId. - pub local_peer_id: PeerId, - /// The libp2p logger handle. - pub log: Logger, -} - -pub struct Context<'a> { - pub config: &'a NetworkConfig, - pub enr_fork_id: EnrForkId, - pub fork_context: Arc, - pub chain_spec: &'a ChainSpec, - pub gossipsub_registry: Option<&'a mut Registry>, -} - -impl Service { - pub async fn new( - executor: task_executor::TaskExecutor, - ctx: Context<'_>, - log: &Logger, - ) -> error::Result<(Arc>, Self)> { - let log = log.new(o!("service"=> "libp2p")); - trace!(log, "Libp2p Service starting"); - - let config = ctx.config; - // initialise the node's ID - let local_keypair = load_private_key(config, &log); - - // Create an ENR or load from disk if appropriate - let enr = - enr::build_or_load_enr::(local_keypair.clone(), config, &ctx.enr_fork_id, &log)?; - - let local_peer_id = enr.peer_id(); - - // Construct the metadata - let meta_data = load_or_build_metadata(&config.network_dir, &log); - - // set up a collection of variables accessible outside of the network crate - let network_globals = Arc::new(NetworkGlobals::new( - enr.clone(), - config.libp2p_port, - config.discovery_port, - meta_data, - config - .trusted_peers - .iter() - .map(|x| PeerId::from(x.clone())) - .collect(), - &log, - )); - - info!(log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); - let discovery_string = if config.disable_discovery { - "None".into() - } else { - config.discovery_port.to_string() - }; - debug!(log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); - - let (mut swarm, bandwidth) = { - // Set up the transport - tcp/ws with noise and mplex - let (transport, bandwidth) = build_transport(local_keypair.clone()) - .map_err(|e| format!("Failed to build transport: {:?}", e))?; - - // Lighthouse network behaviour - let behaviour = - Behaviour::new(&local_keypair, ctx, network_globals.clone(), &log).await?; - - // use the executor for libp2p - struct Executor(task_executor::TaskExecutor); - impl libp2p::core::Executor for Executor { - fn exec(&self, f: Pin + Send>>) { - self.0.spawn(f, "libp2p"); - } - } - - // sets up the libp2p connection limits - let limits = ConnectionLimits::default() - .with_max_pending_incoming(Some(5)) - .with_max_pending_outgoing(Some(16)) - .with_max_established_incoming(Some( - (config.target_peers as f32 - * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) - .ceil() as u32, - )) - .with_max_established_outgoing(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, - )) - .with_max_established(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) - .ceil() as u32, - )) - .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); - - ( - SwarmBuilder::new(transport, behaviour, local_peer_id) - .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) - .connection_event_buffer_size(64) - .connection_limits(limits) - .executor(Box::new(Executor(executor))) - .build(), - bandwidth, - ) - }; - - // listen on the specified address - let listen_multiaddr = { - let mut m = Multiaddr::from(config.listen_address); - m.push(Protocol::Tcp(config.libp2p_port)); - m - }; - - match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) { - Ok(_) => { - let mut log_address = listen_multiaddr; - log_address.push(Protocol::P2p(local_peer_id.into())); - info!(log, "Listening established"; "address" => %log_address); - } - Err(err) => { - crit!( - log, - "Unable to listen on libp2p address"; - "error" => ?err, - "listen_multiaddr" => %listen_multiaddr, - ); - return Err("Libp2p was unable to listen on the given listen address.".into()); - } - }; - - // helper closure for dialing peers - let mut dial = |mut multiaddr: Multiaddr| { - // strip the p2p protocol if it exists - strip_peer_id(&mut multiaddr); - match Swarm::dial(&mut swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => %multiaddr), - Err(err) => debug!( - log, - "Could not connect to peer"; "address" => %multiaddr, "error" => ?err - ), - }; - }; - - // attempt to connect to user-input libp2p nodes - for multiaddr in &config.libp2p_nodes { - dial(multiaddr.clone()); - } - - // attempt to connect to any specified boot-nodes - let mut boot_nodes = config.boot_nodes_enr.clone(); - boot_nodes.dedup(); - - for bootnode_enr in boot_nodes { - for multiaddr in &bootnode_enr.multiaddr() { - // ignore udp multiaddr if it exists - let components = multiaddr.iter().collect::>(); - if let Protocol::Udp(_) = components[1] { - continue; - } - - if !network_globals - .peers - .read() - .is_connected_or_dialing(&bootnode_enr.peer_id()) - { - dial(multiaddr.clone()); - } - } - } - - for multiaddr in &config.boot_nodes_multiaddr { - // check TCP support for dialing - if multiaddr - .iter() - .any(|proto| matches!(proto, Protocol::Tcp(_))) - { - dial(multiaddr.clone()); - } - } - - let mut subscribed_topics: Vec = vec![]; - - for topic_kind in &config.topics { - if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { - subscribed_topics.push(topic_kind.clone()); - } else { - warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind); - } - } - - if !subscribed_topics.is_empty() { - info!(log, "Subscribed to topics"; "topics" => ?subscribed_topics); - } - - let service = Service { - swarm, - bandwidth, - local_peer_id, - log, - }; - - Ok((network_globals, service)) - } - - /// Sends a request to a peer, with a given Id. - pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { - self.swarm - .behaviour_mut() - .send_request(peer_id, request_id, request); - } - - /// Informs the peer that their request failed. - pub fn respond_with_error( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - error: RPCResponseErrorCode, - reason: String, - ) { - self.swarm - .behaviour_mut() - .send_error_reponse(peer_id, id, error, reason); - } - - /// Report a peer's action. - pub fn report_peer( - &mut self, - peer_id: &PeerId, - action: PeerAction, - source: ReportSource, - msg: &'static str, - ) { - self.swarm - .behaviour_mut() - .peer_manager_mut() - .report_peer(peer_id, action, source, None, msg); - } - - /// Disconnect and ban a peer, providing a reason. - pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - self.swarm - .behaviour_mut() - .goodbye_peer(peer_id, reason, source); - } - - /// Sends a response to a peer's request. - pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { - self.swarm - .behaviour_mut() - .send_successful_response(peer_id, id, response); - } - - pub async fn next_event(&mut self) -> Libp2pEvent { - loop { - match self.swarm.select_next_some().await { - SwarmEvent::Behaviour(behaviour) => { - // Handle banning here - match &behaviour { - BehaviourEvent::PeerBanned(peer_id) => { - self.swarm.ban_peer_id(*peer_id); - } - BehaviourEvent::PeerUnbanned(peer_id) => { - self.swarm.unban_peer_id(*peer_id); - } - _ => {} - } - return Libp2pEvent::Behaviour(behaviour); - } - SwarmEvent::ConnectionEstablished { - peer_id: _, - endpoint: _, - num_established: _, - concurrent_dial_errors: _, - } => {} - SwarmEvent::ConnectionClosed { - peer_id: _, - cause: _, - endpoint: _, - num_established: _, - } => {} - SwarmEvent::NewListenAddr { address, .. } => { - return Libp2pEvent::NewListenAddr(address) - } - SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - } => { - trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr) - } - SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); - } - SwarmEvent::BannedPeer { peer_id, .. } => { - debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); - } - SwarmEvent::ExpiredListenAddr { address, .. } => { - debug!(self.log, "Listen address expired"; "address" => %address) - } - SwarmEvent::ListenerClosed { - addresses, reason, .. - } => { - crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); - if Swarm::listeners(&self.swarm).count() == 0 { - return Libp2pEvent::ZeroListeners; - } - } - SwarmEvent::ListenerError { error, .. } => { - // this is non fatal, but we still check - warn!(self.log, "Listener error"; "error" => ?error); - if Swarm::listeners(&self.swarm).count() == 0 { - return Libp2pEvent::ZeroListeners; - } - } - SwarmEvent::Dialing(_peer_id) => {} - } - } - } -} - -type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; - -/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and -/// mplex as the multiplexing layer. -fn build_transport( - local_private_key: Keypair, -) -> std::io::Result<(BoxedTransport, Arc)> { - let tcp = libp2p::tcp::TokioTcpConfig::new().nodelay(true); - let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; - #[cfg(feature = "libp2p-websocket")] - let transport = { - let trans_clone = transport.clone(); - transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) - }; - - let (transport, bandwidth) = BandwidthLogging::new(transport); - - // mplex config - let mut mplex_config = libp2p::mplex::MplexConfig::new(); - mplex_config.set_max_buffer_size(256); - mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); - - // yamux config - let mut yamux_config = libp2p::yamux::YamuxConfig::default(); - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); - - // Authentication - Ok(( - transport - .upgrade(core::upgrade::Version::V1) - .authenticate(generate_noise_config(&local_private_key)) - .multiplex(core::upgrade::SelectUpgrade::new( - yamux_config, - mplex_config, - )) - .timeout(Duration::from_secs(10)) - .boxed(), - bandwidth, - )) -} - -// Useful helper functions for debugging. Currently not used in the client. -#[allow(dead_code)] -fn keypair_from_hex(hex_bytes: &str) -> error::Result { - let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") { - stripped.to_string() - } else { - hex_bytes.to_string() - }; - - hex::decode(&hex_bytes) - .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) - .and_then(keypair_from_bytes) -} - -#[allow(dead_code)] -fn keypair_from_bytes(mut bytes: Vec) -> error::Result { - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) - .map(|secret| { - let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); - Keypair::Secp256k1(keypair) - }) - .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) -} - -/// Loads a private key from disk. If this fails, a new key is -/// generated and is then saved to disk. -/// -/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5. -pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { - // check for key from disk - let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME); - if let Ok(mut network_key_file) = File::open(network_key_f.clone()) { - let mut key_bytes: Vec = Vec::with_capacity(36); - match network_key_file.read_to_end(&mut key_bytes) { - Err(_) => debug!(log, "Could not read network key file"), - Ok(_) => { - // only accept secp256k1 keys for now - if let Ok(secret_key) = - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) - { - let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into(); - debug!(log, "Loaded network key from disk."); - return Keypair::Secp256k1(kp); - } else { - debug!(log, "Network key file is not a valid secp256k1 key"); - } - } - } - } - - // if a key could not be loaded from disk, generate a new one and save it - let local_private_key = Keypair::generate_secp256k1(); - if let Keypair::Secp256k1(key) = local_private_key.clone() { - let _ = std::fs::create_dir_all(&config.network_dir); - match File::create(network_key_f.clone()) - .and_then(|mut f| f.write_all(&key.secret().to_bytes())) - { - Ok(_) => { - debug!(log, "New network key generated and written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write node key to file: {:?}. error: {}", network_key_f, e - ); - } - } - } - local_private_key -} - -/// Generate authenticated XX Noise config from identity keys -fn generate_noise_config( - identity_keypair: &Keypair, -) -> noise::NoiseAuthenticated { - let static_dh_keys = noise::Keypair::::new() - .into_authentic(identity_keypair) - .expect("signing can fail only once during starting a node"); - noise::NoiseConfig::xx(static_dh_keys).into_authenticated() -} - -/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p -/// only supports dialing to an address without providing the peer id. -fn strip_peer_id(addr: &mut Multiaddr) { - let last = addr.pop(); - match last { - Some(Protocol::P2p(_)) => {} - Some(other) => addr.push(other), - _ => {} - } -} - -/// Load metadata from persisted file. Return default metadata if loading fails. -fn load_or_build_metadata( - network_dir: &std::path::Path, - log: &slog::Logger, -) -> MetaData { - // We load a V2 metadata version by default (regardless of current fork) - // since a V2 metadata can be converted to V1. The RPC encoder is responsible - // for sending the correct metadata version based on the negotiated protocol version. - let mut meta_data = MetaDataV2 { - seq_number: 0, - attnets: EnrAttestationBitfield::::default(), - syncnets: EnrSyncCommitteeBitfield::::default(), - }; - // Read metadata from persisted file if available - let metadata_path = network_dir.join(METADATA_FILENAME); - if let Ok(mut metadata_file) = File::open(metadata_path) { - let mut metadata_ssz = Vec::new(); - if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { - // Attempt to read a MetaDataV2 version from the persisted file, - // if that fails, read MetaDataV1 - match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { - Ok(persisted_metadata) => { - meta_data.seq_number = persisted_metadata.seq_number; - // Increment seq number if persisted attnet is not default - if persisted_metadata.attnets != meta_data.attnets - || persisted_metadata.syncnets != meta_data.syncnets - { - meta_data.seq_number += 1; - } - debug!(log, "Loaded metadata from disk"); - } - Err(_) => { - match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { - Ok(persisted_metadata) => { - let persisted_metadata = MetaData::V1(persisted_metadata); - // Increment seq number as the persisted metadata version is updated - meta_data.seq_number = *persisted_metadata.seq_number() + 1; - debug!(log, "Loaded metadata from disk"); - } - Err(e) => { - debug!( - log, - "Metadata from file could not be decoded"; - "error" => ?e, - ); - } - } - } - } - } - }; - - // Wrap the MetaData - let meta_data = MetaData::V2(meta_data); - - debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); - save_metadata_to_disk(network_dir, meta_data.clone(), log); - meta_data -} diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs new file mode 100644 index 0000000000..e5d81737cf --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -0,0 +1,101 @@ +use std::sync::Arc; + +use libp2p::core::connection::ConnectionId; +use types::{EthSpec, SignedBeaconBlock}; + +use crate::rpc::{ + methods::{ + BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse, + RPCResponse, ResponseTermination, StatusMessage, + }, + OutboundRequest, SubstreamId, +}; + +/// Identifier of requests sent by a peer. +pub type PeerRequestId = (ConnectionId, SubstreamId); + +/// Identifier of a request. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RequestId { + Application(AppReqId), + Internal, +} + +/// The type of RPC requests the Behaviour informs it has received and allows for sending. +/// +// NOTE: This is an application-level wrapper over the lower network level requests that can be +// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't +// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`. +#[derive(Debug, Clone, PartialEq)] +pub enum Request { + /// A Status message. + Status(StatusMessage), + /// A blocks by range request. + BlocksByRange(BlocksByRangeRequest), + /// A request blocks root request. + BlocksByRoot(BlocksByRootRequest), +} + +impl std::convert::From for OutboundRequest { + fn from(req: Request) -> OutboundRequest { + match req { + Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), + Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => { + OutboundRequest::BlocksByRange(OldBlocksByRangeRequest { + start_slot, + count, + step: 1, + }) + } + Request::Status(s) => OutboundRequest::Status(s), + } + } +} + +/// The type of RPC responses the Behaviour informs it has received, and allows for sending. +/// +// NOTE: This is an application-level wrapper over the lower network level responses that can be +// sent. The main difference is the absense of Pong and Metadata, which don't leave the +// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and +// `RPCCodedResponse`. +#[derive(Debug, Clone, PartialEq)] +pub enum Response { + /// A Status message. + Status(StatusMessage), + /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. + BlocksByRange(Option>>), + /// A response to a get BLOCKS_BY_ROOT request. + BlocksByRoot(Option>>), +} + +impl std::convert::From> for RPCCodedResponse { + fn from(resp: Response) -> RPCCodedResponse { + match resp { + Response::BlocksByRoot(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot), + }, + Response::BlocksByRange(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), + }, + Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + } + } +} + +impl slog::Value for RequestId { + fn serialize( + &self, + record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + match self { + RequestId::Internal => slog::Value::serialize("Behaviour", record, key, serializer), + RequestId::Application(ref id) => { + slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer) + } + } + } +} diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs new file mode 100644 index 0000000000..8327293a74 --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -0,0 +1,34 @@ +use crate::discovery::Discovery; +use crate::peer_manager::PeerManager; +use crate::rpc::{ReqId, RPC}; +use crate::types::SnappyTransform; + +use libp2p::gossipsub::subscription_filter::{ + MaxCountSubscriptionFilter, WhitelistSubscriptionFilter, +}; +use libp2p::gossipsub::Gossipsub as BaseGossipsub; +use libp2p::identify::Identify; +use libp2p::swarm::NetworkBehaviour; +use libp2p::NetworkBehaviour; +use types::EthSpec; + +use super::api_types::RequestId; + +pub type SubscriptionFilter = MaxCountSubscriptionFilter; +pub type Gossipsub = BaseGossipsub; + +#[derive(NetworkBehaviour)] +pub(crate) struct Behaviour { + /// The routing pub-sub mechanism for eth2. + pub gossipsub: Gossipsub, + /// The Eth2 RPC specified in the wire-0 protocol. + pub eth2_rpc: RPC, TSpec>, + /// Discv5 Discovery protocol. + pub discovery: Discovery, + /// Keep regular connection to peers and disconnect if absent. + // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. + /// Provides IP addresses and peer information. + pub identify: Identify, + /// The peer manager that keeps track of peer's reputation and status. + pub peer_manager: PeerManager, +} diff --git a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs similarity index 100% rename from beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs rename to beacon_node/lighthouse_network/src/service/gossip_cache.rs diff --git a/beacon_node/lighthouse_network/src/behaviour/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs similarity index 100% rename from beacon_node/lighthouse_network/src/behaviour/gossipsub_scoring_parameters.rs rename to beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs similarity index 56% rename from beacon_node/lighthouse_network/src/behaviour/mod.rs rename to beacon_node/lighthouse_network/src/service/mod.rs index 9c9e094db6..53d29ccb21 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,85 +1,63 @@ -use crate::behaviour::gossipsub_scoring_parameters::{ - lighthouse_gossip_thresholds, PeerScoreSettings, -}; -use crate::config::gossipsub_config; +use crate::config::{gossipsub_config, NetworkLoad}; use crate::discovery::{ - subnet_predicate, Discovery, DiscoveryEvent, FIND_NODE_QUERY_CLOSEST_PEERS, + subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, }; use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, }; -use crate::rpc::*; -use crate::service::{Context as ServiceContext, METADATA_FILENAME}; +use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; +use crate::service::behaviour::BehaviourEvent; +pub use crate::service::behaviour::Gossipsub; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; +use crate::{rpc::*, EnrExt}; +use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; +use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; +use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::error::PublishError; -use libp2p::{ - core::{ - connection::ConnectionId, identity::Keypair, multiaddr::Protocol as MProtocol, Multiaddr, - }, - gossipsub::{ - metrics::Config as GossipsubMetricsConfig, - subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter}, - Gossipsub as BaseGossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance, - MessageAuthenticity, MessageId, - }, - identify::{Identify, IdentifyConfig, IdentifyEvent}, - swarm::{ - dial_opts::{DialOpts, PeerCondition}, - AddressScore, NetworkBehaviour, NetworkBehaviourAction as NBAction, - NetworkBehaviourEventProcess, PollParameters, - }, - NetworkBehaviour, PeerId, -}; -use slog::{crit, debug, o, trace, warn}; -use ssz::Encode; -use std::collections::HashSet; -use std::fs::File; -use std::io::Write; -use std::path::{Path, PathBuf}; -use std::{ - collections::VecDeque, - marker::PhantomData, - sync::Arc, - task::{Context, Poll}, -}; -use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, - SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, +use libp2p::gossipsub::metrics::Config as GossipsubMetricsConfig; +use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter; +use libp2p::gossipsub::{ + GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, }; +use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; +use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; +use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::PeerId; +use slog::{crit, debug, info, o, trace, warn}; +use std::marker::PhantomData; +use std::path::PathBuf; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use types::{ + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, +}; +use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; + +use self::behaviour::Behaviour; use self::gossip_cache::GossipCache; +pub mod api_types; +mod behaviour; mod gossip_cache; pub mod gossipsub_scoring_parameters; - +pub mod utils; /// The number of peers we target per subnet for discovery queries. pub const TARGET_SUBNET_PEERS: usize = 6; const MAX_IDENTIFY_ADDRESSES: usize = 10; -/// Identifier of requests sent by a peer. -pub type PeerRequestId = (ConnectionId, SubstreamId); - -pub type SubscriptionFilter = MaxCountSubscriptionFilter; -pub type Gossipsub = BaseGossipsub; - -/// Identifier of a request. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum RequestId { - Application(AppReqId), - Behaviour, -} - /// The types of events than can be obtained from polling the behaviour. #[derive(Debug)] -pub enum BehaviourEvent { +pub enum NetworkEvent { /// We have successfully dialed and connected to a peer. PeerConnectedOutgoing(PeerId), /// A peer has successfully dialed and connected to us. @@ -125,224 +103,419 @@ pub enum BehaviourEvent { }, /// Inform the network to send a Status to this peer. StatusPeer(PeerId), -} - -/// Internal type to pass messages from sub-behaviours to the poll of the global behaviour to be -/// specified as an NBAction. -enum InternalBehaviourMessage { - /// Dial a Peer. - DialPeer(PeerId), - /// The socket has been updated. - SocketUpdated(Multiaddr), + NewListenAddr(Multiaddr), + ZeroListeners, } /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. -#[derive(NetworkBehaviour)] -#[behaviour( - out_event = "BehaviourEvent", - poll_method = "poll", - event_process = true -)] -pub struct Behaviour { - /* Sub-Behaviours */ - /// The routing pub-sub mechanism for eth2. - gossipsub: Gossipsub, - /// The Eth2 RPC specified in the wire-0 protocol. - eth2_rpc: RPC, TSpec>, - /// Discv5 Discovery protocol. - discovery: Discovery, - /// Keep regular connection to peers and disconnect if absent. - // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. - /// Provides IP addresses and peer information. - identify: Identify, - /// The peer manager that keeps track of peer's reputation and status. - peer_manager: PeerManager, - +pub struct Network { + swarm: libp2p::swarm::Swarm>, /* Auxiliary Fields */ - /// The output events generated by this behaviour to be consumed in the swarm poll. - #[behaviour(ignore)] - events: VecDeque>, - /// Internal behaviour events, the NBAction type is composed of sub-behaviours, so we use a - /// custom type here to avoid having to specify the concrete type. - #[behaviour(ignore)] - internal_events: VecDeque, /// A collections of variables accessible outside the network service. - #[behaviour(ignore)] network_globals: Arc>, /// Keeps track of the current EnrForkId for upgrading gossipsub topics. // NOTE: This can be accessed via the network_globals ENR. However we keep it here for quick // lookups for every gossipsub message send. - #[behaviour(ignore)] enr_fork_id: EnrForkId, - /// The waker for the current task. This is used to wake the task when events are added to the - /// queue. - #[behaviour(ignore)] - waker: Option, /// Directory where metadata is stored. - #[behaviour(ignore)] network_dir: PathBuf, - #[behaviour(ignore)] fork_context: Arc, /// Gossipsub score parameters. - #[behaviour(ignore)] score_settings: PeerScoreSettings, /// The interval for updating gossipsub scores - #[behaviour(ignore)] update_gossipsub_scores: tokio::time::Interval, - #[behaviour(ignore)] gossip_cache: GossipCache, + /// The bandwidth logger for the underlying libp2p transport. + pub bandwidth: Arc, + /// This node's PeerId. + pub local_peer_id: PeerId, /// Logger for behaviour actions. - #[behaviour(ignore)] log: slog::Logger, } /// Implements the combined behaviour for the libp2p service. -impl Behaviour { +impl Network { pub async fn new( - local_key: &Keypair, + executor: task_executor::TaskExecutor, ctx: ServiceContext<'_>, - network_globals: Arc>, log: &slog::Logger, - ) -> error::Result { - let behaviour_log = log.new(o!()); - + ) -> error::Result<(Self, Arc>)> { + let log = log.new(o!("service"=> "libp2p")); let mut config = ctx.config.clone(); + trace!(log, "Libp2p Service starting"); + // initialise the node's ID + let local_keypair = utils::load_private_key(&config, &log); - // Set up the Identify Behaviour - let identify_config = if config.private { - IdentifyConfig::new( - "".into(), - local_key.public(), // Still send legitimate public key - ) - .with_cache_size(0) - } else { - IdentifyConfig::new("eth2/1.0.0".into(), local_key.public()) - .with_agent_version(lighthouse_version::version_with_platform()) - .with_cache_size(0) + // set up a collection of variables accessible outside of the network crate + let network_globals = { + // Create an ENR or load from disk if appropriate + let enr = crate::discovery::enr::build_or_load_enr::( + local_keypair.clone(), + &config, + &ctx.enr_fork_id, + &log, + )?; + // Construct the metadata + let meta_data = utils::load_or_build_metadata(&config.network_dir, &log); + let globals = NetworkGlobals::new( + enr, + config.libp2p_port, + config.discovery_port, + meta_data, + config + .trusted_peers + .iter() + .map(|x| PeerId::from(x.clone())) + .collect(), + &log, + ); + Arc::new(globals) }; - // Build and start the discovery sub-behaviour - let mut discovery = - Discovery::new(local_key, &config, network_globals.clone(), log).await?; - // start searching for peers - discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); - // Grab our local ENR FORK ID let enr_fork_id = network_globals .local_enr() .eth2() .expect("Local ENR must have a fork id"); - let possible_fork_digests = ctx.fork_context.all_fork_digests(); - let filter = MaxCountSubscriptionFilter { - filter: Self::create_whitelist_filter( - possible_fork_digests, - ctx.chain_spec.attestation_subnet_count, - SYNC_COMMITTEE_SUBNET_COUNT, - ), - max_subscribed_topics: 200, - max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 - }; - - config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); - - // If metrics are enabled for gossipsub build the configuration - let gossipsub_metrics = ctx - .gossipsub_registry - .map(|registry| (registry, GossipsubMetricsConfig::default())); - - let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); - let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( - MessageAuthenticity::Anonymous, - config.gs_config.clone(), - gossipsub_metrics, - filter, - snappy_transform, - ) - .map_err(|e| format!("Could not construct gossipsub: {:?}", e))?; - - // Construct a set of gossipsub peer scoring parameters - // We don't know the number of active validators and the current slot yet - let active_validators = TSpec::minimum_validator_count(); - let current_slot = Slot::new(0); - - let thresholds = lighthouse_gossip_thresholds(); - let score_settings = PeerScoreSettings::new(ctx.chain_spec, &config.gs_config); - // Prepare scoring parameters - let params = score_settings.get_peer_score_params( - active_validators, - &thresholds, - &enr_fork_id, - current_slot, - )?; + let gossip_cache = { + let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot); + let half_epoch = std::time::Duration::from_secs( + ctx.chain_spec.seconds_per_slot * TSpec::slots_per_epoch() / 2, + ); - trace!(behaviour_log, "Using peer score params"; "params" => ?params); - - // Set up a scoring update interval - let update_gossipsub_scores = tokio::time::interval(params.decay_interval); - - gossipsub - .with_peer_score(params, thresholds) - .expect("Valid score params and thresholds"); - - let peer_manager_cfg = PeerManagerCfg { - discovery_enabled: !config.disable_discovery, - metrics_enabled: config.metrics_enabled, - target_peer_count: config.target_peers, - ..Default::default() + GossipCache::builder() + .beacon_block_timeout(slot_duration) + .aggregates_timeout(half_epoch) + .attestation_timeout(half_epoch) + .voluntary_exit_timeout(half_epoch * 2) + .proposer_slashing_timeout(half_epoch * 2) + .attester_slashing_timeout(half_epoch * 2) + // .signed_contribution_and_proof_timeout(timeout) // Do not retry + // .sync_committee_message_timeout(timeout) // Do not retry + .build() }; - let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot); - let half_epoch = std::time::Duration::from_secs( - ctx.chain_spec.seconds_per_slot * TSpec::slots_per_epoch() / 2, - ); - let gossip_cache = GossipCache::builder() - .beacon_block_timeout(slot_duration) - .aggregates_timeout(half_epoch) - .attestation_timeout(half_epoch) - .voluntary_exit_timeout(half_epoch * 2) - .proposer_slashing_timeout(half_epoch * 2) - .attester_slashing_timeout(half_epoch * 2) - // .signed_contribution_and_proof_timeout(timeout) // Do not retry - // .sync_committee_message_timeout(timeout) // Do not retry - .build(); + let local_peer_id = network_globals.local_peer_id(); - Ok(Behaviour { - // Sub-behaviours - gossipsub, - eth2_rpc: RPC::new(ctx.fork_context.clone(), log.clone()), - discovery, - identify: Identify::new(identify_config), - // Auxiliary fields - peer_manager: PeerManager::new(peer_manager_cfg, network_globals.clone(), log).await?, - events: VecDeque::new(), - internal_events: VecDeque::new(), + let (gossipsub, update_gossipsub_scores) = { + let thresholds = lighthouse_gossip_thresholds(); + + // Prepare scoring parameters + let params = { + // Construct a set of gossipsub peer scoring parameters + // We don't know the number of active validators and the current slot yet + let active_validators = TSpec::minimum_validator_count(); + let current_slot = Slot::new(0); + score_settings.get_peer_score_params( + active_validators, + &thresholds, + &enr_fork_id, + current_slot, + )? + }; + + trace!(log, "Using peer score params"; "params" => ?params); + + // Set up a scoring update interval + let update_gossipsub_scores = tokio::time::interval(params.decay_interval); + + let possible_fork_digests = ctx.fork_context.all_fork_digests(); + let filter = MaxCountSubscriptionFilter { + filter: utils::create_whitelist_filter( + possible_fork_digests, + ctx.chain_spec.attestation_subnet_count, + SYNC_COMMITTEE_SUBNET_COUNT, + ), + max_subscribed_topics: 200, + max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 + }; + + config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); + + // If metrics are enabled for gossipsub build the configuration + let gossipsub_metrics = ctx + .gossipsub_registry + .map(|registry| (registry, GossipsubMetricsConfig::default())); + + let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); + let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( + MessageAuthenticity::Anonymous, + config.gs_config.clone(), + gossipsub_metrics, + filter, + snappy_transform, + ) + .map_err(|e| format!("Could not construct gossipsub: {:?}", e))?; + + gossipsub + .with_peer_score(params, thresholds) + .expect("Valid score params and thresholds"); + + (gossipsub, update_gossipsub_scores) + }; + + let eth2_rpc = RPC::new(ctx.fork_context.clone(), log.clone()); + + let discovery = { + // Build and start the discovery sub-behaviour + let mut discovery = + Discovery::new(&local_keypair, &config, network_globals.clone(), &log).await?; + // start searching for peers + discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); + discovery + }; + + let identify = { + let identify_config = if config.private { + IdentifyConfig::new( + "".into(), + local_keypair.public(), // Still send legitimate public key + ) + .with_cache_size(0) + } else { + IdentifyConfig::new("eth2/1.0.0".into(), local_keypair.public()) + .with_agent_version(lighthouse_version::version_with_platform()) + .with_cache_size(0) + }; + Identify::new(identify_config) + }; + + let peer_manager = { + let peer_manager_cfg = PeerManagerCfg { + discovery_enabled: !config.disable_discovery, + metrics_enabled: config.metrics_enabled, + target_peer_count: config.target_peers, + ..Default::default() + }; + PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)? + }; + + let behaviour = { + Behaviour { + gossipsub, + eth2_rpc, + discovery, + identify, + peer_manager, + } + }; + + let (swarm, bandwidth) = { + // Set up the transport - tcp/ws with noise and mplex + let (transport, bandwidth) = build_transport(local_keypair.clone()) + .map_err(|e| format!("Failed to build transport: {:?}", e))?; + + // use the executor for libp2p + struct Executor(task_executor::TaskExecutor); + impl libp2p::core::Executor for Executor { + fn exec(&self, f: Pin + Send>>) { + self.0.spawn(f, "libp2p"); + } + } + + // sets up the libp2p connection limits + let limits = ConnectionLimits::default() + .with_max_pending_incoming(Some(5)) + .with_max_pending_outgoing(Some(16)) + .with_max_established_incoming(Some( + (config.target_peers as f32 + * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) + .ceil() as u32, + )) + .with_max_established_outgoing(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, + )) + .with_max_established(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) + .ceil() as u32, + )) + .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); + + ( + SwarmBuilder::new(transport, behaviour, local_peer_id) + .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) + .connection_event_buffer_size(64) + .connection_limits(limits) + .executor(Box::new(Executor(executor))) + .build(), + bandwidth, + ) + }; + + let mut network = Network { + swarm, network_globals, enr_fork_id, - waker: None, network_dir: config.network_dir.clone(), - log: behaviour_log, - score_settings, fork_context: ctx.fork_context, - gossip_cache, + score_settings, update_gossipsub_scores, - }) + gossip_cache, + bandwidth, + local_peer_id, + log, + }; + + network.start(&config).await?; + + let network_globals = network.network_globals.clone(); + + Ok((network, network_globals)) + } + + /// Starts the network: + /// + /// - Starts listening in the given ports. + /// - Dials boot-nodes and libp2p peers. + /// - Subscribes to starting gossipsub topics. + async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { + let enr = self.network_globals.local_enr(); + info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); + let discovery_string = if config.disable_discovery { + "None".into() + } else { + config.discovery_port.to_string() + }; + + debug!(self.log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); + + let listen_multiaddr = { + let mut m = Multiaddr::from(config.listen_address); + m.push(MProtocol::Tcp(config.libp2p_port)); + m + }; + + match self.swarm.listen_on(listen_multiaddr.clone()) { + Ok(_) => { + let mut log_address = listen_multiaddr; + log_address.push(MProtocol::P2p(enr.peer_id().into())); + info!(self.log, "Listening established"; "address" => %log_address); + } + Err(err) => { + crit!( + self.log, + "Unable to listen on libp2p address"; + "error" => ?err, + "listen_multiaddr" => %listen_multiaddr, + ); + return Err("Libp2p was unable to listen on the given listen address.".into()); + } + }; + + // helper closure for dialing peers + let mut dial = |mut multiaddr: Multiaddr| { + // strip the p2p protocol if it exists + strip_peer_id(&mut multiaddr); + match self.swarm.dial(multiaddr.clone()) { + Ok(()) => debug!(self.log, "Dialing libp2p peer"; "address" => %multiaddr), + Err(err) => { + debug!(self.log, "Could not connect to peer"; "address" => %multiaddr, "error" => ?err) + } + }; + }; + + // attempt to connect to user-input libp2p nodes + for multiaddr in &config.libp2p_nodes { + dial(multiaddr.clone()); + } + + // attempt to connect to any specified boot-nodes + let mut boot_nodes = config.boot_nodes_enr.clone(); + boot_nodes.dedup(); + + for bootnode_enr in boot_nodes { + for multiaddr in &bootnode_enr.multiaddr() { + // ignore udp multiaddr if it exists + let components = multiaddr.iter().collect::>(); + if let MProtocol::Udp(_) = components[1] { + continue; + } + + if !self + .network_globals + .peers + .read() + .is_connected_or_dialing(&bootnode_enr.peer_id()) + { + dial(multiaddr.clone()); + } + } + } + + for multiaddr in &config.boot_nodes_multiaddr { + // check TCP support for dialing + if multiaddr + .iter() + .any(|proto| matches!(proto, MProtocol::Tcp(_))) + { + dial(multiaddr.clone()); + } + } + + let mut subscribed_topics: Vec = vec![]; + + for topic_kind in &config.topics { + if self.subscribe_kind(topic_kind.clone()) { + subscribed_topics.push(topic_kind.clone()); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic_kind); + } + } + + if !subscribed_topics.is_empty() { + info!(self.log, "Subscribed to topics"; "topics" => ?subscribed_topics); + } + + Ok(()) } /* Public Accessible Functions to interact with the behaviour */ - /// Get a mutable reference to the underlying discovery sub-behaviour. + /// The routing pub-sub mechanism for eth2. + pub fn gossipsub_mut(&mut self) -> &mut Gossipsub { + &mut self.swarm.behaviour_mut().gossipsub + } + /// The Eth2 RPC specified in the wire-0 protocol. + pub fn eth2_rpc_mut(&mut self) -> &mut RPC, TSpec> { + &mut self.swarm.behaviour_mut().eth2_rpc + } + /// Discv5 Discovery protocol. pub fn discovery_mut(&mut self) -> &mut Discovery { - &mut self.discovery + &mut self.swarm.behaviour_mut().discovery + } + /// Provides IP addresses and peer information. + pub fn identify_mut(&mut self) -> &mut Identify { + &mut self.swarm.behaviour_mut().identify + } + /// The peer manager that keeps track of peer's reputation and status. + pub fn peer_manager_mut(&mut self) -> &mut PeerManager { + &mut self.swarm.behaviour_mut().peer_manager } - /// Get a mutable reference to the peer manager. - pub fn peer_manager_mut(&mut self) -> &mut PeerManager { - &mut self.peer_manager + /// The routing pub-sub mechanism for eth2. + pub fn gossipsub(&self) -> &Gossipsub { + &self.swarm.behaviour().gossipsub + } + /// The Eth2 RPC specified in the wire-0 protocol. + pub fn eth2_rpc(&self) -> &RPC, TSpec> { + &self.swarm.behaviour().eth2_rpc + } + /// Discv5 Discovery protocol. + pub fn discovery(&self) -> &Discovery { + &self.swarm.behaviour().discovery + } + /// Provides IP addresses and peer information. + pub fn identify(&self) -> &Identify { + &self.swarm.behaviour().identify + } + /// The peer manager that keeps track of peer's reputation and status. + pub fn peer_manager(&self) -> &PeerManager { + &self.swarm.behaviour().peer_manager } /// Returns the local ENR of the node. @@ -350,11 +523,6 @@ impl Behaviour { self.network_globals.local_enr() } - /// Obtain a reference to the gossipsub protocol. - pub fn gs(&self) -> &Gossipsub { - &self.gossipsub - } - /* Pubsub behaviour functions */ /// Subscribes to a gossipsub topic kind, letting the network service determine the @@ -413,7 +581,7 @@ impl Behaviour { let topic: Topic = topic.into(); - match self.gossipsub.subscribe(&topic) { + match self.gossipsub_mut().subscribe(&topic) { Err(e) => { warn!(self.log, "Failed to subscribe to topic"; "topic" => %topic, "error" => ?e); false @@ -436,7 +604,7 @@ impl Behaviour { // unsubscribe from the topic let libp2p_topic: Topic = topic.clone().into(); - match self.gossipsub.unsubscribe(&libp2p_topic) { + match self.gossipsub_mut().unsubscribe(&libp2p_topic) { Err(_) => { warn!(self.log, "Failed to unsubscribe from topic"; "topic" => %libp2p_topic); false @@ -455,8 +623,8 @@ impl Behaviour { for topic in message.topics(GossipEncoding::default(), self.enr_fork_id.fork_digest) { let message_data = message.encode(GossipEncoding::default()); if let Err(e) = self - .gossipsub - .publish(topic.clone().into(), message_data.clone()) + .gossipsub_mut() + .publish(Topic::from(topic.clone()), message_data.clone()) { slog::warn!(self.log, "Could not publish message"; "error" => ?e); @@ -515,7 +683,7 @@ impl Behaviour { } } - if let Err(e) = self.gossipsub.report_message_validation_result( + if let Err(e) = self.gossipsub_mut().report_message_validation_result( &message_id, propagation_source, validation_result, @@ -548,16 +716,16 @@ impl Behaviour { "beacon_attestation_subnet_params" => ?beacon_attestation_subnet_params, ); - self.gossipsub + self.gossipsub_mut() .set_topic_params(get_topic(GossipKind::BeaconBlock), beacon_block_params)?; - self.gossipsub.set_topic_params( + self.gossipsub_mut().set_topic_params( get_topic(GossipKind::BeaconAggregateAndProof), beacon_aggregate_proof_params, )?; for i in 0..self.score_settings.attestation_subnet_count() { - self.gossipsub.set_topic_params( + self.gossipsub_mut().set_topic_params( get_topic(GossipKind::Attestation(SubnetId::new(i))), beacon_attestation_subnet_params.clone(), )?; @@ -570,18 +738,17 @@ impl Behaviour { /// Send a request to a peer over RPC. pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { - self.eth2_rpc - .send_request(peer_id, RequestId::Application(request_id), request.into()) + self.eth2_rpc_mut().send_request( + peer_id, + RequestId::Application(request_id), + request.into(), + ) } /// Send a successful response to a peer over RPC. - pub fn send_successful_response( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - response: Response, - ) { - self.eth2_rpc.send_response(peer_id, id, response.into()) + pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { + self.eth2_rpc_mut() + .send_response(peer_id, id, response.into()) } /// Inform the peer that their request produced an error. @@ -592,35 +759,54 @@ impl Behaviour { error: RPCResponseErrorCode, reason: String, ) { - self.eth2_rpc - .send_response(peer_id, id, RPCCodedResponse::Error(error, reason.into())) + self.eth2_rpc_mut().send_response( + peer_id, + id, + RPCCodedResponse::Error(error, reason.into()), + ) } /* Peer management functions */ + pub fn testing_dial(&mut self, addr: Multiaddr) -> Result<(), libp2p::swarm::DialError> { + self.swarm.dial(addr) + } + + pub fn report_peer( + &mut self, + peer_id: &PeerId, + action: PeerAction, + source: ReportSource, + msg: &'static str, + ) { + self.peer_manager_mut() + .report_peer(peer_id, action, source, None, msg); + } + /// Disconnects from a peer providing a reason. /// /// This will send a goodbye, disconnect and then ban the peer. /// This is fatal for a peer, and should be used in unrecoverable circumstances. pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - self.peer_manager.goodbye_peer(peer_id, reason, source); + self.peer_manager_mut() + .goodbye_peer(peer_id, reason, source); } /// Returns an iterator over all enr entries in the DHT. - pub fn enr_entries(&mut self) -> Vec { - self.discovery.table_entries_enr() + pub fn enr_entries(&self) -> Vec { + self.discovery().table_entries_enr() } /// Add an ENR to the routing table of the discovery mechanism. pub fn add_enr(&mut self, enr: Enr) { - self.discovery.add_enr(enr); + self.discovery_mut().add_enr(enr); } /// Updates a subnet value to the ENR attnets/syncnets bitfield. /// /// The `value` is `true` if a subnet is being added and false otherwise. pub fn update_enr_subnet(&mut self, subnet_id: Subnet, value: bool) { - if let Err(e) = self.discovery.update_enr_bitfield(subnet_id, value) { + if let Err(e) = self.discovery_mut().update_enr_bitfield(subnet_id, value) { crit!(self.log, "Could not update ENR bitfield"; "error" => e); } // update the local meta data which informs our peers of the update during PINGS @@ -631,7 +817,7 @@ impl Behaviour { /// would like to retain the peers for. pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { // If discovery is not started or disabled, ignore the request - if !self.discovery.started { + if !self.discovery().started { return; } @@ -678,13 +864,13 @@ impl Behaviour { // request the subnet query from discovery if !filtered.is_empty() { - self.discovery.discover_subnet_peers(filtered); + self.discovery_mut().discover_subnet_peers(filtered); } } /// Updates the local ENR's "eth2" field with the latest EnrForkId. pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { - self.discovery.update_eth2_enr(enr_fork_id.clone()); + self.discovery_mut().update_eth2_enr(enr_fork_id.clone()); // update the local reference self.enr_fork_id = enr_fork_id; @@ -695,13 +881,13 @@ impl Behaviour { /// Updates the current meta data of the node to match the local ENR. fn update_metadata_bitfields(&mut self) { let local_attnets = self - .discovery + .discovery_mut() .local_enr() .attestation_bitfield::() .expect("Local discovery must have attestation bitfield"); let local_syncnets = self - .discovery + .discovery_mut() .local_enr() .sync_committee_bitfield::() .expect("Local discovery must have sync committee bitfield"); @@ -717,7 +903,7 @@ impl Behaviour { } } // Save the updated metadata to disk - save_metadata_to_disk( + utils::save_metadata_to_disk( &self.network_dir, self.network_globals.local_metadata.read().clone(), &self.log, @@ -730,8 +916,8 @@ impl Behaviour { data: *self.network_globals.local_metadata.read().seq_number(), }; trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); - let id = RequestId::Behaviour; - self.eth2_rpc + let id = RequestId::Internal; + self.eth2_rpc_mut() .send_request(peer_id, id, OutboundRequest::Ping(ping)); } @@ -742,14 +928,14 @@ impl Behaviour { }; trace!(self.log, "Sending Pong"; "request_id" => id.1, "peer_id" => %peer_id); let event = RPCCodedResponse::Success(RPCResponse::Pong(ping)); - self.eth2_rpc.send_response(peer_id, id, event); + self.eth2_rpc_mut().send_response(peer_id, id, event); } /// Sends a METADATA request to a peer. fn send_meta_data_request(&mut self, peer_id: PeerId) { let event = OutboundRequest::MetaData(PhantomData); - self.eth2_rpc - .send_request(peer_id, RequestId::Behaviour, event); + self.eth2_rpc_mut() + .send_request(peer_id, RequestId::Internal, event); } /// Sends a METADATA response to a peer. @@ -757,35 +943,36 @@ impl Behaviour { let event = RPCCodedResponse::Success(RPCResponse::MetaData( self.network_globals.local_metadata.read().clone(), )); - self.eth2_rpc.send_response(peer_id, id, event); - } - - /// Returns a reference to the peer manager to allow the swarm to notify the manager of peer - /// status - pub fn peer_manager(&mut self) -> &mut PeerManager { - &mut self.peer_manager + self.eth2_rpc_mut().send_response(peer_id, id, event); } // RPC Propagation methods /// Queues the response to be sent upwards as long at it was requested outside the Behaviour. - fn propagate_response( + #[must_use = "return the response"] + fn build_response( &mut self, id: RequestId, peer_id: PeerId, response: Response, - ) { + ) -> Option> { match id { - RequestId::Application(id) => self.add_event(BehaviourEvent::ResponseReceived { + RequestId::Application(id) => Some(NetworkEvent::ResponseReceived { peer_id, id, response, }), - RequestId::Behaviour => {} + RequestId::Internal => None, } } /// Convenience function to propagate a request. - fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { + #[must_use = "actually return the event"] + fn build_request( + &mut self, + id: PeerRequestId, + peer_id: PeerId, + request: Request, + ) -> NetworkEvent { // Increment metrics match &request { Request::Status(_) => { @@ -798,18 +985,10 @@ impl Behaviour { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } } - self.add_event(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }); - } - - /// Adds an event to the queue waking the current task to process it. - fn add_event(&mut self, event: BehaviourEvent) { - self.events.push_back(event); - if let Some(waker) = &self.waker { - waker.wake_by_ref(); } } @@ -818,7 +997,7 @@ impl Behaviour { fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet) { let predicate = subnet_predicate::(vec![subnet], &self.log); let peers_to_dial: Vec = self - .discovery + .discovery() .cached_enrs() .filter_map(|(peer_id, enr)| { let peers = self.network_globals.peers.read(); @@ -833,61 +1012,17 @@ impl Behaviour { debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); // Remove the ENR from the cache to prevent continual re-dialing on disconnects - self.discovery.remove_cached_enr(&peer_id); + self.discovery_mut().remove_cached_enr(&peer_id); // For any dial event, inform the peer manager let enr = self.discovery_mut().enr_of_peer(&peer_id); - self.peer_manager.inject_dialing(&peer_id, enr); - self.internal_events - .push_back(InternalBehaviourMessage::DialPeer(peer_id)); + self.peer_manager_mut().dial_peer(&peer_id, enr); } } - /// Creates a whitelist topic filter that covers all possible topics using the given set of - /// possible fork digests. - fn create_whitelist_filter( - possible_fork_digests: Vec<[u8; 4]>, - attestation_subnet_count: u64, - sync_committee_subnet_count: u64, - ) -> WhitelistSubscriptionFilter { - let mut possible_hashes = HashSet::new(); - for fork_digest in possible_fork_digests { - let mut add = |kind| { - let topic: Topic = - GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); - possible_hashes.insert(topic.hash()); - }; + /* Sub-behaviour event handling functions */ - use GossipKind::*; - add(BeaconBlock); - add(BeaconAggregateAndProof); - add(VoluntaryExit); - add(ProposerSlashing); - add(AttesterSlashing); - add(SignedContributionAndProof); - for id in 0..attestation_subnet_count { - add(Attestation(SubnetId::new(id))); - } - for id in 0..sync_committee_subnet_count { - add(SyncCommitteeMessage(SyncSubnetId::new(id))); - } - } - WhitelistSubscriptionFilter(possible_hashes) - } -} - -/* Behaviour Event Process Implementations - * - * These implementations dictate how to process each event that is emitted from each - * sub-behaviour. - */ - -// Gossipsub -impl NetworkBehaviourEventProcess for Behaviour -where - AppReqId: ReqId, - TSpec: EthSpec, -{ - fn inject_event(&mut self, event: GossipsubEvent) { + /// Handle a gossipsub event. + fn inject_gs_event(&mut self, event: GossipsubEvent) -> Option> { match event { GossipsubEvent::Message { propagation_source, @@ -900,7 +1035,7 @@ where Err(e) => { debug!(self.log, "Could not decode gossipsub message"; "topic" => ?gs_msg.topic,"error" => e); //reject the message - if let Err(e) = self.gossipsub.report_message_validation_result( + if let Err(e) = self.gossipsub_mut().report_message_validation_result( &id, &propagation_source, MessageAcceptance::Reject, @@ -910,7 +1045,7 @@ where } Ok(msg) => { // Notify the network - self.add_event(BehaviourEvent::PubsubMessage { + return Some(NetworkEvent::PubsubMessage { id, source: propagation_source, topic: gs_msg.topic, @@ -931,7 +1066,12 @@ where if let Some(msgs) = self.gossip_cache.retrieve(&topic) { for data in msgs { let topic_str: &str = topic.kind().as_ref(); - match self.gossipsub.publish(topic.clone().into(), data) { + match self + .swarm + .behaviour_mut() + .gossipsub + .publish(Topic::from(topic.clone()), data) + { Ok(_) => { warn!(self.log, "Gossip message published on retry"; "topic" => topic_str); if let Some(v) = metrics::get_int_counter( @@ -965,7 +1105,7 @@ where } GossipsubEvent::GossipsubNotSupported { peer_id } => { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); - self.peer_manager.report_peer( + self.peer_manager_mut().report_peer( &peer_id, PeerAction::LowToleranceError, ReportSource::Gossipsub, @@ -974,26 +1114,23 @@ where ); } } + None } -} -// RPC -impl NetworkBehaviourEventProcess, TSpec>> - for Behaviour -where - AppReqId: ReqId, - TSpec: EthSpec, -{ - fn inject_event(&mut self, event: RPCMessage, TSpec>) { + /// Handle an RPC event. + fn inject_rpc_event( + &mut self, + event: RPCMessage, TSpec>, + ) -> Option> { let peer_id = event.peer_id; - if !self.peer_manager.is_connected(&peer_id) { + if !self.peer_manager().is_connected(&peer_id) { debug!( self.log, "Ignoring rpc message of disconnecting peer"; event ); - return; + return None; } let handler_id = event.conn_id; @@ -1009,16 +1146,17 @@ where // Inform the peer manager of the error. // An inbound error here means we sent an error to the peer, or the stream // timed out. - self.peer_manager.handle_rpc_error( + self.peer_manager_mut().handle_rpc_error( &peer_id, proto, &error, ConnectionDirection::Incoming, ); + None } HandlerErr::Outbound { id, proto, error } => { // Inform the peer manager that a request we sent to the peer failed - self.peer_manager.handle_rpc_error( + self.peer_manager_mut().handle_rpc_error( &peer_id, proto, &error, @@ -1026,7 +1164,9 @@ where ); // inform failures of requests comming outside the behaviour if let RequestId::Application(id) = id { - self.add_event(BehaviourEvent::RPCFailed { peer_id, id }); + Some(NetworkEvent::RPCFailed { peer_id, id }) + } else { + None } } } @@ -1037,13 +1177,15 @@ where /* Behaviour managed protocols: Ping and Metadata */ InboundRequest::Ping(ping) => { // inform the peer manager and send the response - self.peer_manager.ping_request(&peer_id, ping.data); + self.peer_manager_mut().ping_request(&peer_id, ping.data); // send a ping response self.pong(peer_request_id, peer_id); + None } InboundRequest::MetaData(_) => { // send the requested meta-data self.send_meta_data_response((handler_id, id), peer_id); + None } InboundRequest::Goodbye(reason) => { // queue for disconnection without a goodbye message @@ -1057,13 +1199,16 @@ where // disconnecting here. The RPC handler will automatically // disconnect for us. // The actual disconnection event will be relayed to the application. + None } /* Protocols propagated to the Network */ InboundRequest::Status(msg) => { // inform the peer manager that we have received a status from a peer - self.peer_manager.peer_statusd(&peer_id); + self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards - self.propagate_request(peer_request_id, peer_id, Request::Status(msg)) + let event = + self.build_request(peer_request_id, peer_id, Request::Status(msg)); + Some(event) } InboundRequest::BlocksByRange(req) => { let methods::OldBlocksByRangeRequest { @@ -1073,7 +1218,7 @@ where } = req; // Still disconnect the peer if the request is naughty. if step == 0 { - self.peer_manager.handle_rpc_error( + self.peer_manager_mut().handle_rpc_error( &peer_id, Protocol::BlocksByRange, &RPCError::InvalidData( @@ -1081,41 +1226,53 @@ where ), ConnectionDirection::Incoming, ); + return None; } // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 if step > 1 { count = 1; } - self.propagate_request( + let event = self.build_request( peer_request_id, peer_id, Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }), ); + Some(event) } InboundRequest::BlocksByRoot(req) => { - self.propagate_request(peer_request_id, peer_id, Request::BlocksByRoot(req)) + let event = self.build_request( + peer_request_id, + peer_id, + Request::BlocksByRoot(req), + ); + Some(event) } } } Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ - RPCResponse::Pong(ping) => self.peer_manager.pong_response(&peer_id, ping.data), + RPCResponse::Pong(ping) => { + self.peer_manager_mut().pong_response(&peer_id, ping.data); + None + } RPCResponse::MetaData(meta_data) => { - self.peer_manager.meta_data_response(&peer_id, meta_data) + self.peer_manager_mut() + .meta_data_response(&peer_id, meta_data); + None } /* Network propagated protocols */ RPCResponse::Status(msg) => { // inform the peer manager that we have received a status from a peer - self.peer_manager.peer_statusd(&peer_id); + self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards - self.propagate_response(id, peer_id, Response::Status(msg)); + self.build_response(id, peer_id, Response::Status(msg)) } RPCResponse::BlocksByRange(resp) => { - self.propagate_response(id, peer_id, Response::BlocksByRange(Some(resp))) + self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } RPCResponse::BlocksByRoot(resp) => { - self.propagate_response(id, peer_id, Response::BlocksByRoot(Some(resp))) + self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } } } @@ -1124,52 +1281,32 @@ where ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), }; - self.propagate_response(id, peer_id, response); + self.build_response(id, peer_id, response) } } } -} -// Discovery -impl NetworkBehaviourEventProcess for Behaviour -where - AppReqId: ReqId, - TSpec: EthSpec, -{ - fn inject_event(&mut self, event: DiscoveryEvent) { - match event { - DiscoveryEvent::SocketUpdated(socket_addr) => { - // A new UDP socket has been detected. - // Build a multiaddr to report to libp2p - let mut multiaddr = Multiaddr::from(socket_addr.ip()); - // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling - // should handle this. - multiaddr.push(MProtocol::Tcp(self.network_globals.listen_port_tcp())); - self.internal_events - .push_back(InternalBehaviourMessage::SocketUpdated(multiaddr)); - } - DiscoveryEvent::QueryResult(results) => { - let to_dial_peers = self.peer_manager.peers_discovered(results); - for peer_id in to_dial_peers { - debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); - // For any dial event, inform the peer manager - let enr = self.discovery_mut().enr_of_peer(&peer_id); - self.peer_manager.inject_dialing(&peer_id, enr); - self.internal_events - .push_back(InternalBehaviourMessage::DialPeer(peer_id)); - } - } + /// Handle a discovery event. + fn inject_discovery_event( + &mut self, + event: DiscoveredPeers, + ) -> Option> { + let DiscoveredPeers { peers } = event; + let to_dial_peers = self.peer_manager_mut().peers_discovered(peers); + for peer_id in to_dial_peers { + debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); + // For any dial event, inform the peer manager + let enr = self.discovery_mut().enr_of_peer(&peer_id); + self.peer_manager_mut().dial_peer(&peer_id, enr); } + None } -} -// Identify -impl NetworkBehaviourEventProcess for Behaviour -where - TSpec: EthSpec, - AppReqId: ReqId, -{ - fn inject_event(&mut self, event: IdentifyEvent) { + /// Handle an identify event. + fn inject_identify_event( + &mut self, + event: IdentifyEvent, + ) -> Option> { match event { IdentifyEvent::Received { peer_id, mut info } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { @@ -1180,66 +1317,155 @@ where info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); } // send peer info to the peer manager. - self.peer_manager.identify(&peer_id, &info); + self.peer_manager_mut().identify(&peer_id, &info); } IdentifyEvent::Sent { .. } => {} IdentifyEvent::Error { .. } => {} IdentifyEvent::Pushed { .. } => {} } + None } -} -type BehaviourHandler = - as NetworkBehaviour>::ConnectionHandler; - -impl Behaviour -where - TSpec: EthSpec, - AppReqId: ReqId, -{ - /// Consumes the events list and drives the Lighthouse global NetworkBehaviour. - fn poll( + /// Handle a peer manager event. + fn inject_pm_event( &mut self, - cx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll, BehaviourHandler>> { - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); + event: PeerManagerEvent, + ) -> Option> { + match event { + PeerManagerEvent::PeerConnectedIncoming(peer_id) => { + Some(NetworkEvent::PeerConnectedIncoming(peer_id)) } - } else { - self.waker = Some(cx.waker().clone()); - } - - // Handle internal events first - if let Some(event) = self.internal_events.pop_front() { - match event { - InternalBehaviourMessage::DialPeer(peer_id) => { - // Submit the event - let handler = self.new_handler(); - return Poll::Ready(NBAction::Dial { - opts: DialOpts::peer_id(peer_id) - .condition(PeerCondition::Disconnected) - .build(), - handler, - }); - } - InternalBehaviourMessage::SocketUpdated(address) => { - return Poll::Ready(NBAction::ReportObservedAddr { - address, - score: AddressScore::Finite(1), - }); - } + PeerManagerEvent::PeerConnectedOutgoing(peer_id) => { + Some(NetworkEvent::PeerConnectedOutgoing(peer_id)) + } + PeerManagerEvent::PeerDisconnected(peer_id) => { + Some(NetworkEvent::PeerDisconnected(peer_id)) + } + PeerManagerEvent::Banned(peer_id, associated_ips) => { + self.discovery_mut().ban_peer(&peer_id, associated_ips); + Some(NetworkEvent::PeerBanned(peer_id)) + } + PeerManagerEvent::UnBanned(peer_id, associated_ips) => { + self.discovery_mut().unban_peer(&peer_id, associated_ips); + Some(NetworkEvent::PeerUnbanned(peer_id)) + } + PeerManagerEvent::Status(peer_id) => { + // it's time to status. We don't keep a beacon chain reference here, so we inform + // the network to send a status to this peer + Some(NetworkEvent::StatusPeer(peer_id)) + } + PeerManagerEvent::DiscoverPeers(peers_to_find) => { + // Peer manager has requested a discovery query for more peers. + self.discovery_mut().discover_peers(peers_to_find); + None + } + PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover) => { + // Peer manager has requested a subnet discovery query for more peers. + self.discover_subnet_peers(subnets_to_discover); + None + } + PeerManagerEvent::Ping(peer_id) => { + // send a ping request to this peer + self.ping(peer_id); + None + } + PeerManagerEvent::MetaData(peer_id) => { + self.send_meta_data_request(peer_id); + None + } + PeerManagerEvent::DisconnectPeer(peer_id, reason) => { + debug!(self.log, "Peer Manager disconnecting peer"; + "peer_id" => %peer_id, "reason" => %reason); + // send one goodbye + self.eth2_rpc_mut() + .shutdown(peer_id, RequestId::Internal, reason); + None } } + } - if let Some(event) = self.events.pop_front() { - return Poll::Ready(NBAction::GenerateEvent(event)); + /* Networking polling */ + + /// Poll the p2p networking stack. + /// + /// This will poll the swarm and do maintenance routines. + pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { + while let Poll::Ready(Some(swarm_event)) = self.swarm.poll_next_unpin(cx) { + let maybe_event = match swarm_event { + SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { + // Handle sub-behaviour events. + BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), + BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), + BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), + BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), + BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + }, + SwarmEvent::ConnectionEstablished { .. } => None, + SwarmEvent::ConnectionClosed { .. } => None, + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + } => { + trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); + None + } + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + } => { + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); + None + } + SwarmEvent::OutgoingConnectionError { peer_id, error } => { + debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); + None + } + SwarmEvent::BannedPeer { + peer_id, + endpoint: _, + } => { + debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + None + } + SwarmEvent::NewListenAddr { address, .. } => { + Some(NetworkEvent::NewListenAddr(address)) + } + SwarmEvent::ExpiredListenAddr { address, .. } => { + debug!(self.log, "Listen address expired"; "address" => %address); + None + } + SwarmEvent::ListenerClosed { + addresses, reason, .. + } => { + crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { + None + } + } + SwarmEvent::ListenerError { error, .. } => { + // this is non fatal, but we still check + warn!(self.log, "Listener error"; "error" => ?error); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { + None + } + } + SwarmEvent::Dialing(_) => None, + }; + + if let Some(ev) = maybe_event { + return Poll::Ready(ev); + } } // perform gossipsub score updates when necessary while self.update_gossipsub_scores.poll_tick(cx).is_ready() { - self.peer_manager.update_gossipsub_scores(&self.gossipsub); + let this = self.swarm.behaviour_mut(); + this.peer_manager.update_gossipsub_scores(&this.gossipsub); } // poll the gossipsub cache to clear expired messages @@ -1256,161 +1482,10 @@ where } } } - Poll::Pending } -} -impl NetworkBehaviourEventProcess - for Behaviour -{ - fn inject_event(&mut self, event: PeerManagerEvent) { - match event { - PeerManagerEvent::PeerConnectedIncoming(peer_id) => { - self.add_event(BehaviourEvent::PeerConnectedIncoming(peer_id)); - } - PeerManagerEvent::PeerConnectedOutgoing(peer_id) => { - self.add_event(BehaviourEvent::PeerConnectedOutgoing(peer_id)); - } - PeerManagerEvent::PeerDisconnected(peer_id) => { - self.add_event(BehaviourEvent::PeerDisconnected(peer_id)); - } - PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.discovery.ban_peer(&peer_id, associated_ips); - self.add_event(BehaviourEvent::PeerBanned(peer_id)); - } - PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.discovery.unban_peer(&peer_id, associated_ips); - self.add_event(BehaviourEvent::PeerUnbanned(peer_id)); - } - PeerManagerEvent::Status(peer_id) => { - // it's time to status. We don't keep a beacon chain reference here, so we inform - // the network to send a status to this peer - self.add_event(BehaviourEvent::StatusPeer(peer_id)); - } - PeerManagerEvent::DiscoverPeers(peers_to_find) => { - // Peer manager has requested a discovery query for more peers. - self.discovery.discover_peers(peers_to_find); - } - PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover) => { - // Peer manager has requested a subnet discovery query for more peers. - self.discover_subnet_peers(subnets_to_discover); - } - PeerManagerEvent::Ping(peer_id) => { - // send a ping request to this peer - self.ping(peer_id); - } - PeerManagerEvent::MetaData(peer_id) => { - self.send_meta_data_request(peer_id); - } - PeerManagerEvent::DisconnectPeer(peer_id, reason) => { - debug!(self.log, "Peer Manager disconnecting peer"; - "peer_id" => %peer_id, "reason" => %reason); - // send one goodbye - self.eth2_rpc - .shutdown(peer_id, RequestId::Behaviour, reason); - } - } - } -} - -/* Public API types */ - -/// The type of RPC requests the Behaviour informs it has received and allows for sending. -/// -// NOTE: This is an application-level wrapper over the lower network level requests that can be -// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't -// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`. -#[derive(Debug, Clone, PartialEq)] -pub enum Request { - /// A Status message. - Status(StatusMessage), - /// A blocks by range request. - BlocksByRange(BlocksByRangeRequest), - /// A request blocks root request. - BlocksByRoot(BlocksByRootRequest), -} - -impl std::convert::From for OutboundRequest { - fn from(req: Request) -> OutboundRequest { - match req { - Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), - Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => { - OutboundRequest::BlocksByRange(methods::OldBlocksByRangeRequest { - start_slot, - count, - step: 1, - }) - } - Request::Status(s) => OutboundRequest::Status(s), - } - } -} - -/// The type of RPC responses the Behaviour informs it has received, and allows for sending. -/// -// NOTE: This is an application-level wrapper over the lower network level responses that can be -// sent. The main difference is the absense of Pong and Metadata, which don't leave the -// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and -// `RPCCodedResponse`. -#[derive(Debug, Clone, PartialEq)] -pub enum Response { - /// A Status message. - Status(StatusMessage), - /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. - BlocksByRange(Option>>), - /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Option>>), -} - -impl std::convert::From> for RPCCodedResponse { - fn from(resp: Response) -> RPCCodedResponse { - match resp { - Response::BlocksByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot), - }, - Response::BlocksByRange(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), - }, - Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), - } - } -} - -/// Persist metadata to disk -pub fn save_metadata_to_disk(dir: &Path, metadata: MetaData, log: &slog::Logger) { - let _ = std::fs::create_dir_all(&dir); - match File::create(dir.join(METADATA_FILENAME)) - .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) - { - Ok(_) => { - debug!(log, "Metadata written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write metadata to disk"; - "file" => format!("{:?}{:?}", dir, METADATA_FILENAME), - "error" => %e - ); - } - } -} - -impl slog::Value for RequestId { - fn serialize( - &self, - record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - match self { - RequestId::Behaviour => slog::Value::serialize("Behaviour", record, key, serializer), - RequestId::Application(ref id) => { - slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer) - } - } + pub async fn next_event(&mut self) -> NetworkEvent { + futures::future::poll_fn(|cx| self.poll_network(cx)).await } } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs new file mode 100644 index 0000000000..2aaa46fe8b --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -0,0 +1,288 @@ +use crate::multiaddr::Protocol; +use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; +use crate::types::{ + error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, +}; +use crate::{GossipTopic, NetworkConfig}; +use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks}; +use libp2p::core::{ + identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, +}; +use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter; +use libp2p::gossipsub::IdentTopic as Topic; +use libp2p::{core, noise, PeerId, Transport}; +use prometheus_client::registry::Registry; +use slog::{debug, warn}; +use ssz::Decode; +use ssz::Encode; +use std::collections::HashSet; +use std::fs::File; +use std::io::prelude::*; +use std::path::Path; +use std::sync::Arc; +use std::time::Duration; +use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId}; + +pub const NETWORK_KEY_FILENAME: &str = "key"; +/// The maximum simultaneous libp2p connections per peer. +pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; +/// The filename to store our local metadata. +pub const METADATA_FILENAME: &str = "metadata"; + +pub struct Context<'a> { + pub config: &'a NetworkConfig, + pub enr_fork_id: EnrForkId, + pub fork_context: Arc, + pub chain_spec: &'a ChainSpec, + pub gossipsub_registry: Option<&'a mut Registry>, +} + +type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; + +/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and +/// mplex as the multiplexing layer. +pub fn build_transport( + local_private_key: Keypair, +) -> std::io::Result<(BoxedTransport, Arc)> { + let tcp = + libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::default().nodelay(true)); + let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; + #[cfg(feature = "libp2p-websocket")] + let transport = { + let trans_clone = transport.clone(); + transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) + }; + + let (transport, bandwidth) = BandwidthLogging::new(transport); + + // mplex config + let mut mplex_config = libp2p::mplex::MplexConfig::new(); + mplex_config.set_max_buffer_size(256); + mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); + + // yamux config + let mut yamux_config = libp2p::yamux::YamuxConfig::default(); + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + + // Authentication + Ok(( + transport + .upgrade(core::upgrade::Version::V1) + .authenticate(generate_noise_config(&local_private_key)) + .multiplex(core::upgrade::SelectUpgrade::new( + yamux_config, + mplex_config, + )) + .timeout(Duration::from_secs(10)) + .boxed(), + bandwidth, + )) +} + +// Useful helper functions for debugging. Currently not used in the client. +#[allow(dead_code)] +fn keypair_from_hex(hex_bytes: &str) -> error::Result { + let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") { + stripped.to_string() + } else { + hex_bytes.to_string() + }; + + hex::decode(&hex_bytes) + .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) + .and_then(keypair_from_bytes) +} + +#[allow(dead_code)] +fn keypair_from_bytes(mut bytes: Vec) -> error::Result { + libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) + .map(|secret| { + let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); + Keypair::Secp256k1(keypair) + }) + .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) +} + +/// Loads a private key from disk. If this fails, a new key is +/// generated and is then saved to disk. +/// +/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5. +pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { + // check for key from disk + let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME); + if let Ok(mut network_key_file) = File::open(network_key_f.clone()) { + let mut key_bytes: Vec = Vec::with_capacity(36); + match network_key_file.read_to_end(&mut key_bytes) { + Err(_) => debug!(log, "Could not read network key file"), + Ok(_) => { + // only accept secp256k1 keys for now + if let Ok(secret_key) = + libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) + { + let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into(); + debug!(log, "Loaded network key from disk."); + return Keypair::Secp256k1(kp); + } else { + debug!(log, "Network key file is not a valid secp256k1 key"); + } + } + } + } + + // if a key could not be loaded from disk, generate a new one and save it + let local_private_key = Keypair::generate_secp256k1(); + if let Keypair::Secp256k1(key) = local_private_key.clone() { + let _ = std::fs::create_dir_all(&config.network_dir); + match File::create(network_key_f.clone()) + .and_then(|mut f| f.write_all(&key.secret().to_bytes())) + { + Ok(_) => { + debug!(log, "New network key generated and written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write node key to file: {:?}. error: {}", network_key_f, e + ); + } + } + } + local_private_key +} + +/// Generate authenticated XX Noise config from identity keys +fn generate_noise_config( + identity_keypair: &Keypair, +) -> noise::NoiseAuthenticated { + let static_dh_keys = noise::Keypair::::new() + .into_authentic(identity_keypair) + .expect("signing can fail only once during starting a node"); + noise::NoiseConfig::xx(static_dh_keys).into_authenticated() +} + +/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p +/// only supports dialing to an address without providing the peer id. +pub fn strip_peer_id(addr: &mut Multiaddr) { + let last = addr.pop(); + match last { + Some(Protocol::P2p(_)) => {} + Some(other) => addr.push(other), + _ => {} + } +} + +/// Load metadata from persisted file. Return default metadata if loading fails. +pub fn load_or_build_metadata( + network_dir: &std::path::Path, + log: &slog::Logger, +) -> MetaData { + // We load a V2 metadata version by default (regardless of current fork) + // since a V2 metadata can be converted to V1. The RPC encoder is responsible + // for sending the correct metadata version based on the negotiated protocol version. + let mut meta_data = MetaDataV2 { + seq_number: 0, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }; + // Read metadata from persisted file if available + let metadata_path = network_dir.join(METADATA_FILENAME); + if let Ok(mut metadata_file) = File::open(metadata_path) { + let mut metadata_ssz = Vec::new(); + if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { + // Attempt to read a MetaDataV2 version from the persisted file, + // if that fails, read MetaDataV1 + match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { + Ok(persisted_metadata) => { + meta_data.seq_number = persisted_metadata.seq_number; + // Increment seq number if persisted attnet is not default + if persisted_metadata.attnets != meta_data.attnets + || persisted_metadata.syncnets != meta_data.syncnets + { + meta_data.seq_number += 1; + } + debug!(log, "Loaded metadata from disk"); + } + Err(_) => { + match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { + Ok(persisted_metadata) => { + let persisted_metadata = MetaData::V1(persisted_metadata); + // Increment seq number as the persisted metadata version is updated + meta_data.seq_number = *persisted_metadata.seq_number() + 1; + debug!(log, "Loaded metadata from disk"); + } + Err(e) => { + debug!( + log, + "Metadata from file could not be decoded"; + "error" => ?e, + ); + } + } + } + } + } + }; + + // Wrap the MetaData + let meta_data = MetaData::V2(meta_data); + + debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); + save_metadata_to_disk(network_dir, meta_data.clone(), log); + meta_data +} + +/// Creates a whitelist topic filter that covers all possible topics using the given set of +/// possible fork digests. +pub(crate) fn create_whitelist_filter( + possible_fork_digests: Vec<[u8; 4]>, + attestation_subnet_count: u64, + sync_committee_subnet_count: u64, +) -> WhitelistSubscriptionFilter { + let mut possible_hashes = HashSet::new(); + for fork_digest in possible_fork_digests { + let mut add = |kind| { + let topic: Topic = + GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); + possible_hashes.insert(topic.hash()); + }; + + use GossipKind::*; + add(BeaconBlock); + add(BeaconAggregateAndProof); + add(VoluntaryExit); + add(ProposerSlashing); + add(AttesterSlashing); + add(SignedContributionAndProof); + for id in 0..attestation_subnet_count { + add(Attestation(SubnetId::new(id))); + } + for id in 0..sync_committee_subnet_count { + add(SyncCommitteeMessage(SyncSubnetId::new(id))); + } + } + WhitelistSubscriptionFilter(possible_hashes) +} + +/// Persist metadata to disk +pub(crate) fn save_metadata_to_disk( + dir: &Path, + metadata: MetaData, + log: &slog::Logger, +) { + let _ = std::fs::create_dir_all(&dir); + match File::create(dir.join(METADATA_FILENAME)) + .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) + { + Ok(_) => { + debug!(log, "Metadata written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write metadata to disk"; + "file" => format!("{:?}{:?}", dir, METADATA_FILENAME), + "error" => %e + ); + } + } +} diff --git a/beacon_node/lighthouse_network/tests/common/behaviour.rs b/beacon_node/lighthouse_network/tests/common/behaviour.rs index 76eecfcbc5..50fe6941db 100644 --- a/beacon_node/lighthouse_network/tests/common/behaviour.rs +++ b/beacon_node/lighthouse_network/tests/common/behaviour.rs @@ -23,7 +23,8 @@ use std::collections::HashMap; use std::task::{Context, Poll}; -use libp2p::core::connection::{ConnectedPoint, ConnectionId, ListenerId}; +use libp2p::core::connection::{ConnectedPoint, ConnectionId}; +use libp2p::core::transport::ListenerId; use libp2p::swarm::handler::{ConnectionHandler, DummyConnectionHandler, IntoConnectionHandler}; use libp2p::swarm::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use libp2p::{Multiaddr, PeerId}; diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index ea770de6c2..a3c32d0fb1 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -1,10 +1,10 @@ #![cfg(test)] use libp2p::gossipsub::GossipsubConfigBuilder; +use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::Enr; use lighthouse_network::EnrExt; use lighthouse_network::Multiaddr; -use lighthouse_network::Service as LibP2PService; -use lighthouse_network::{Libp2pEvent, NetworkConfig}; +use lighthouse_network::{NetworkConfig, NetworkEvent}; use slog::{debug, error, o, Drain}; use std::sync::Arc; use std::sync::Weak; @@ -119,18 +119,19 @@ pub async fn build_libp2p_instance( LibP2PService::new(executor, libp2p_context, &log) .await .expect("should build libp2p instance") - .1, + .0, signal, ) } #[allow(dead_code)] pub fn get_enr(node: &LibP2PService) -> Enr { - node.swarm.behaviour().local_enr() + node.local_enr() } // Returns `n` libp2p peers in fully connected topology. #[allow(dead_code)] +/* pub async fn build_full_mesh( rt: Weak, log: slog::Logger, @@ -157,8 +158,7 @@ pub async fn build_full_mesh( } } nodes -} - +}*/ // Constructs a pair of nodes with separate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] @@ -173,19 +173,19 @@ pub async fn build_node_pair( let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name).await; let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name).await; - let receiver_multiaddr = receiver.swarm.behaviour_mut().local_enr().multiaddr()[1].clone(); + let receiver_multiaddr = receiver.local_enr().multiaddr()[1].clone(); // let the two nodes set up listeners let sender_fut = async { loop { - if let Libp2pEvent::NewListenAddr(_) = sender.next_event().await { + if let NetworkEvent::NewListenAddr(_) = sender.next_event().await { return; } } }; let receiver_fut = async { loop { - if let Libp2pEvent::NewListenAddr(_) = receiver.next_event().await { + if let NetworkEvent::NewListenAddr(_) = receiver.next_event().await { return; } } @@ -199,7 +199,8 @@ pub async fn build_node_pair( _ = joined => {} } - match libp2p::Swarm::dial(&mut sender.swarm, receiver_multiaddr.clone()) { + // sender.dial_peer(peer_id); + match sender.testing_dial(receiver_multiaddr.clone()) { Ok(()) => { debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr)) } @@ -226,7 +227,7 @@ pub async fn build_linear( .map(|x| get_enr(x).multiaddr()[1].clone()) .collect(); for i in 0..n - 1 { - match libp2p::Swarm::dial(&mut nodes[i].swarm, multiaddrs[i + 1].clone()) { + match nodes[i].testing_dial(multiaddrs[i + 1].clone()) { Ok(()) => debug!(log, "Connected"), Err(_) => error!(log, "Failed to connect"), }; diff --git a/beacon_node/lighthouse_network/tests/pm_tests.rs b/beacon_node/lighthouse_network/tests/pm_tests.rs index 96767204db..17a044ced0 100644 --- a/beacon_node/lighthouse_network/tests/pm_tests.rs +++ b/beacon_node/lighthouse_network/tests/pm_tests.rs @@ -98,9 +98,7 @@ async fn banned_peers_consistency() { discovery_enabled: false, ..Default::default() }; - let pm = PeerManager::new(pm_config, globals.clone(), &pm_log) - .await - .unwrap(); + let pm = PeerManager::new(pm_config, globals.clone(), &pm_log).unwrap(); let mut pm_swarm = swarm::new_test_swarm(Behaviour::new(pm)); let pm_addr = swarm::bind_listener(&mut pm_swarm).await; let service = Service { swarm: pm_swarm }; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 90052859bc..9183453492 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,8 +1,6 @@ #![cfg(test)] use lighthouse_network::rpc::methods::*; -use lighthouse_network::{ - rpc::max_rpc_size, BehaviourEvent, Libp2pEvent, ReportSource, Request, Response, -}; +use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; use slog::{debug, warn, Level}; use ssz::Encode; use ssz_types::VariableList; @@ -86,19 +84,16 @@ fn test_status_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 10, rpc_request.clone()); + sender.send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 10, response, - }) => { + } => { // Should receive the RPC response debug!(log, "Sender Received"); assert_eq!(response, rpc_response.clone()); @@ -114,19 +109,15 @@ fn test_status_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response debug!(log, "Receiver Received"); - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } } _ => {} // Ignore other events @@ -191,20 +182,16 @@ fn test_blocks_by_range_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().send_request( - peer_id, - request_id, - rpc_request.clone(), - ); + sender.send_request(peer_id, request_id, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: _, response, - }) => { + } => { warn!(log, "Sender received a response"); match response { Response::BlocksByRange(Some(_)) => { @@ -236,11 +223,11 @@ fn test_blocks_by_range_chunked_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response warn!(log, "Receiver got request"); @@ -254,18 +241,10 @@ fn test_blocks_by_range_chunked_rpc() { } else { rpc_response_merge_small.clone() }; - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); } } _ => {} // Ignore other events @@ -318,17 +297,13 @@ fn test_blocks_by_range_over_limit() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().send_request( - peer_id, - request_id, - rpc_request.clone(), - ); + sender.send_request(peer_id, request_id, rpc_request.clone()); } // The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE - Libp2pEvent::Behaviour(BehaviourEvent::RPCFailed { id, .. }) => { + NetworkEvent::RPCFailed { id, .. } => { assert_eq!(id, request_id); return; } @@ -341,28 +316,20 @@ fn test_blocks_by_range_over_limit() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 0..messages_to_send { let rpc_response = rpc_response_merge_large.clone(); - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); } } _ => {} // Ignore other events @@ -418,20 +385,16 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().send_request( - peer_id, - request_id, - rpc_request.clone(), - ); + sender.send_request(peer_id, request_id, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: _, response, - }) => + } => // Should receive the RPC response { debug!(log, "Sender received a response"); @@ -469,11 +432,11 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { .await { futures::future::Either::Left(( - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }), + }, _, )) => { if request == rpc_request { @@ -490,11 +453,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.behaviour_mut().send_successful_response( - *peer_id, - *stream_id, - rpc_response.clone(), - ); + receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -550,19 +509,16 @@ fn test_blocks_by_range_single_empty_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 10, rpc_request.clone()); + sender.send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 10, response, - }) => match response { + } => match response { Response::BlocksByRange(Some(_)) => { assert_eq!(response, rpc_response.clone()); messages_received += 1; @@ -585,28 +541,20 @@ fn test_blocks_by_range_single_empty_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); } } _ => {} // Ignore other events @@ -676,19 +624,16 @@ fn test_blocks_by_root_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 6, rpc_request.clone()); + sender.send_request(peer_id, 6, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 6, response, - }) => match response { + } => match response { Response::BlocksByRoot(Some(_)) => { if messages_received < 2 { assert_eq!(response, rpc_response_base.clone()); @@ -717,11 +662,11 @@ fn test_blocks_by_root_chunked_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response debug!(log, "Receiver got request"); @@ -735,19 +680,11 @@ fn test_blocks_by_root_chunked_rpc() { } else { rpc_response_merge_small.clone() }; - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response, - ); + receiver.send_response(peer_id, id, rpc_response); debug!(log, "Sending message"); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); debug!(log, "Send stream term"); } } @@ -811,19 +748,16 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 10, rpc_request.clone()); + sender.send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 10, response, - }) => { + } => { debug!(log, "Sender received a response"); match response { Response::BlocksByRoot(Some(_)) => { @@ -861,11 +795,11 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { .await { futures::future::Either::Left(( - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }), + }, _, )) => { if request == rpc_request { @@ -882,11 +816,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.behaviour_mut().send_successful_response( - *peer_id, - *stream_id, - rpc_response.clone(), - ); + receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -926,16 +856,16 @@ fn test_goodbye_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a goodbye and disconnect debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().goodbye_peer( + sender.goodbye_peer( &peer_id, GoodbyeReason::IrrelevantNetwork, ReportSource::SyncService, ); } - Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { + NetworkEvent::PeerDisconnected(_) => { return; } _ => {} // Ignore other RPC messages @@ -947,7 +877,7 @@ fn test_goodbye_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { + NetworkEvent::PeerDisconnected(_) => { // Should receive sent RPC request return; } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index f5e32dcff0..ec8573ea1f 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -11,17 +11,16 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; -use lighthouse_network::{ - prometheus_client::registry::Registry, MessageAcceptance, Service as LibP2PService, -}; +use futures::StreamExt; +use lighthouse_network::service::Network; +use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, - Context, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, - Response, Subnet, + Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, - BehaviourEvent, MessageId, NetworkGlobals, PeerId, + MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; @@ -171,7 +170,7 @@ pub struct NetworkService { /// A reference to the underlying beacon chain. beacon_chain: Arc>, /// The underlying libp2p service that drives all the network interactions. - libp2p: LibP2PService, + libp2p: Network, /// An attestation and subnet manager service. attestation_service: AttestationService, /// A sync committeee subnet manager service. @@ -273,8 +272,8 @@ impl NetworkService { }; // launch libp2p service - let (network_globals, mut libp2p) = - LibP2PService::new(executor.clone(), service_context, &network_log).await?; + let (mut libp2p, network_globals) = + Network::new(executor.clone(), service_context, &network_log).await?; // Repopulate the DHT with stored ENR's if discovery is not disabled. if !config.disable_discovery { @@ -284,7 +283,7 @@ impl NetworkService { "Loading peers into the routing table"; "peers" => enrs_to_load.len() ); for enr in enrs_to_load { - libp2p.swarm.behaviour_mut().add_enr(enr.clone()); + libp2p.add_enr(enr.clone()); } } @@ -402,7 +401,7 @@ impl NetworkService { _ = self.metrics_update.tick(), if self.metrics_enabled => { // update various network metrics metrics::update_gossip_metrics::( - self.libp2p.swarm.behaviour().gs(), + self.libp2p.gossipsub(), &self.network_globals, ); // update sync metrics @@ -429,7 +428,7 @@ impl NetworkService { Some(_) = &mut self.next_unsubscribe => { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); - self.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); + self.libp2p.unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); info!(self.log, "Unsubscribed from old fork topics"); self.next_unsubscribe = Box::pin(None.into()); } @@ -439,7 +438,7 @@ impl NetworkService { let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); info!(self.log, "Subscribing to new fork topics"); - self.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest); + self.libp2p.subscribe_new_fork_topics(fork_digest); self.next_fork_subscriptions = Box::pin(None.into()); } else { @@ -456,92 +455,90 @@ impl NetworkService { /// Handle an event received from the network. async fn on_libp2p_event( &mut self, - ev: Libp2pEvent, + ev: NetworkEvent, shutdown_sender: &mut Sender, ) { match ev { - Libp2pEvent::Behaviour(event) => match event { - BehaviourEvent::PeerConnectedOutgoing(peer_id) => { - self.send_to_router(RouterMessage::PeerDialed(peer_id)); - } - BehaviourEvent::PeerConnectedIncoming(_) - | BehaviourEvent::PeerBanned(_) - | BehaviourEvent::PeerUnbanned(_) => { - // No action required for these events. - } - BehaviourEvent::PeerDisconnected(peer_id) => { - self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); - } - BehaviourEvent::RequestReceived { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + self.send_to_router(RouterMessage::PeerDialed(peer_id)); + } + NetworkEvent::PeerConnectedIncoming(_) + | NetworkEvent::PeerBanned(_) + | NetworkEvent::PeerUnbanned(_) => { + // No action required for these events. + } + NetworkEvent::PeerDisconnected(peer_id) => { + self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); + } + NetworkEvent::RequestReceived { + peer_id, + id, + request, + } => { + self.send_to_router(RouterMessage::RPCRequestReceived { peer_id, id, request, - } => { - self.send_to_router(RouterMessage::RPCRequestReceived { - peer_id, - id, - request, - }); - } - BehaviourEvent::ResponseReceived { + }); + } + NetworkEvent::ResponseReceived { + peer_id, + id, + response, + } => { + self.send_to_router(RouterMessage::RPCResponseReceived { peer_id, - id, + request_id: id, response, - } => { - self.send_to_router(RouterMessage::RPCResponseReceived { - peer_id, - request_id: id, - response, - }); - } - BehaviourEvent::RPCFailed { id, peer_id } => { - self.send_to_router(RouterMessage::RPCFailed { - peer_id, - request_id: id, - }); - } - BehaviourEvent::StatusPeer(peer_id) => { - self.send_to_router(RouterMessage::StatusPeer(peer_id)); - } - BehaviourEvent::PubsubMessage { - id, - source, - message, - .. - } => { - match message { - // attestation information gets processed in the attestation service - PubsubMessage::Attestation(ref subnet_and_attestation) => { - let subnet = subnet_and_attestation.0; - let attestation = &subnet_and_attestation.1; - // checks if we have an aggregator for the slot. If so, we should process - // the attestation, else we just just propagate the Attestation. - let should_process = self - .attestation_service - .should_process_attestation(subnet, attestation); - self.send_to_router(RouterMessage::PubsubMessage( - id, - source, - message, - should_process, - )); - } - _ => { - // all else is sent to the router - self.send_to_router(RouterMessage::PubsubMessage( - id, source, message, true, - )); - } + }); + } + NetworkEvent::RPCFailed { id, peer_id } => { + self.send_to_router(RouterMessage::RPCFailed { + peer_id, + request_id: id, + }); + } + NetworkEvent::StatusPeer(peer_id) => { + self.send_to_router(RouterMessage::StatusPeer(peer_id)); + } + NetworkEvent::PubsubMessage { + id, + source, + message, + .. + } => { + match message { + // attestation information gets processed in the attestation service + PubsubMessage::Attestation(ref subnet_and_attestation) => { + let subnet = subnet_and_attestation.0; + let attestation = &subnet_and_attestation.1; + // checks if we have an aggregator for the slot. If so, we should process + // the attestation, else we just just propagate the Attestation. + let should_process = self + .attestation_service + .should_process_attestation(subnet, attestation); + self.send_to_router(RouterMessage::PubsubMessage( + id, + source, + message, + should_process, + )); + } + _ => { + // all else is sent to the router + self.send_to_router(RouterMessage::PubsubMessage( + id, source, message, true, + )); } } - }, - Libp2pEvent::NewListenAddr(multiaddr) => { + } + NetworkEvent::NewListenAddr(multiaddr) => { self.network_globals .listen_multiaddrs .write() .push(multiaddr); } - Libp2pEvent::ZeroListeners => { + NetworkEvent::ZeroListeners => { let _ = shutdown_sender .send(ShutdownReason::Failure( "All listeners are closed. Unable to listen", @@ -588,7 +585,7 @@ impl NetworkService { id, reason, } => { - self.libp2p.respond_with_error(peer_id, id, error, reason); + self.libp2p.send_error_reponse(peer_id, id, error, reason); } NetworkMessage::UPnPMappingEstablished { tcp_socket, @@ -599,8 +596,6 @@ impl NetworkService { if let Some(tcp_socket) = tcp_socket { if let Err(e) = self .libp2p - .swarm - .behaviour_mut() .discovery_mut() .update_enr_tcp_port(tcp_socket.port()) { @@ -613,8 +608,6 @@ impl NetworkService { if let Some(udp_socket) = udp_socket { if let Err(e) = self .libp2p - .swarm - .behaviour_mut() .discovery_mut() .update_enr_udp_socket(udp_socket) { @@ -633,14 +626,11 @@ impl NetworkService { "message_id" => %message_id, "validation_result" => ?validation_result ); - self.libp2p - .swarm - .behaviour_mut() - .report_message_validation_result( - &propagation_source, - message_id, - validation_result, - ); + self.libp2p.report_message_validation_result( + &propagation_source, + message_id, + validation_result, + ); } NetworkMessage::Publish { messages } => { let mut topic_kinds = Vec::new(); @@ -655,7 +645,7 @@ impl NetworkService { "count" => messages.len(), "topics" => ?topic_kinds ); - self.libp2p.swarm.behaviour_mut().publish(messages); + self.libp2p.publish(messages); } NetworkMessage::ReportPeer { peer_id, @@ -693,7 +683,7 @@ impl NetworkService { GossipEncoding::default(), fork_digest, ); - if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); @@ -706,10 +696,10 @@ impl NetworkService { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); // Update the ENR bitfield - self.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); @@ -720,17 +710,14 @@ impl NetworkService { for subnet_id in 0..subnet_max { let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); // Update the ENR bitfield - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new( subnet.into(), GossipEncoding::default(), fork_digest, ); - if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); @@ -782,8 +769,6 @@ impl NetworkService { if let Some(active_validators) = active_validators_opt { if self .libp2p - .swarm - .behaviour_mut() .update_gossipsub_parameters(active_validators, slot) .is_err() { @@ -811,33 +796,24 @@ impl NetworkService { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().subscribe(topic); + self.libp2p.subscribe(topic); } } SubnetServiceMessage::Unsubscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + self.libp2p.unsubscribe(topic); } } SubnetServiceMessage::EnrAdd(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); } SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, false); + self.libp2p.update_enr_subnet(subnet, false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - self.libp2p - .swarm - .behaviour_mut() - .discover_subnet_peers(subnets_to_discover); + self.libp2p.discover_subnet_peers(subnets_to_discover); } } } @@ -848,33 +824,24 @@ impl NetworkService { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().subscribe(topic); + self.libp2p.subscribe(topic); } } SubnetServiceMessage::Unsubscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + self.libp2p.unsubscribe(topic); } } SubnetServiceMessage::EnrAdd(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); } SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, false); + self.libp2p.update_enr_subnet(subnet, false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - self.libp2p - .swarm - .behaviour_mut() - .discover_subnet_peers(subnets_to_discover); + self.libp2p.discover_subnet_peers(subnets_to_discover); } } } @@ -892,10 +859,7 @@ impl NetworkService { ); fork_context.update_current_fork(*new_fork_name); - self.libp2p - .swarm - .behaviour_mut() - .update_fork_version(new_enr_fork_id); + self.libp2p.update_fork_version(new_enr_fork_id); // Reinitialize the next_fork_update self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into()); @@ -944,7 +908,7 @@ fn next_fork_subscriptions_delay( impl Drop for NetworkService { fn drop(&mut self) { // network thread is terminating - let enrs = self.libp2p.swarm.behaviour_mut().enr_entries(); + let enrs = self.libp2p.enr_entries(); debug!( self.log, "Persisting DHT to store"; diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 661035ca51..61eb206421 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -16,7 +16,7 @@ operating system. Install the following packages: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler ``` > Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories @@ -32,13 +32,18 @@ sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clan brew install cmake ``` +1. Install protoc using Homebrew: +``` +brew install protobuf +``` + [Homebrew]: https://brew.sh/ #### Windows 1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. -1. Install Make, CMake and LLVM using Chocolatey: +1. Install Make, CMake, LLVM and protoc using Chocolatey: ``` choco install make @@ -52,10 +57,13 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' choco install llvm ``` +``` +choco install protoc +``` + These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. - [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about ## Build Lighthouse diff --git a/book/src/setup.md b/book/src/setup.md index e8c56623be..a1febe4a02 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -14,6 +14,8 @@ The additional requirements for developers are: don't have `ganache` available on your `PATH` or if ganache is older than v7. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. +- [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for + the networking stack. - [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum, used by web3signer_tests. diff --git a/scripts/cross/aarch64-unknown-linux-gnu.dockerfile b/scripts/cross/aarch64-unknown-linux-gnu.dockerfile new file mode 100644 index 0000000000..691639cd41 --- /dev/null +++ b/scripts/cross/aarch64-unknown-linux-gnu.dockerfile @@ -0,0 +1,14 @@ +ARG CROSS_BASE_IMAGE +FROM $CROSS_BASE_IMAGE + +RUN apt-get update -y && apt-get upgrade -y + +RUN apt-get install -y unzip && \ + PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \ + curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-aarch_64.zip -o protoc.zip && \ + unzip protoc.zip -d /usr && \ + chmod +x /usr/bin/protoc + +RUN apt-get install -y cmake clang-3.9 + +ENV PROTOC=/usr/bin/protoc diff --git a/scripts/cross/x86_64-unknown-linux-gnu.dockerfile b/scripts/cross/x86_64-unknown-linux-gnu.dockerfile new file mode 100644 index 0000000000..5472b980ba --- /dev/null +++ b/scripts/cross/x86_64-unknown-linux-gnu.dockerfile @@ -0,0 +1,14 @@ +ARG CROSS_BASE_IMAGE +FROM $CROSS_BASE_IMAGE + +RUN apt-get update -y && apt-get upgrade -y + +RUN apt-get install -y unzip && \ + PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \ + curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip -o protoc.zip && \ + unzip protoc.zip -d /usr && \ + chmod +x /usr/bin/protoc + +RUN apt-get install -y cmake clang-3.9 + +ENV PROTOC=/usr/bin/protoc From 01b6bf7a2d539aee1ab3b96990a9c455cf094395 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 29 Sep 2022 01:50:12 +0000 Subject: [PATCH 12/27] Improve logging a little (#3619) Some of the logs in combination with others could be improved. It will save some time debugging by improving the wording slightly. --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- .../network/src/beacon_processor/worker/rpc_methods.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2a449f64ba..3eecc9a0dc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2391,7 +2391,7 @@ impl BeaconChain { Ok(verified) => { debug!( chain.log, - "Successfully processed gossip block"; + "Successfully verified gossip block"; "graffiti" => graffiti_string, "slot" => slot, "root" => ?verified.block_root(), diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 8ca9c35e47..37aee01716 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -338,7 +338,7 @@ impl Worker { if blocks_sent < (req.count as usize) { debug!( self.log, - "BlocksByRange Response processed"; + "BlocksByRange outgoing response processed"; "peer" => %peer_id, "msg" => "Failed to return all requested blocks", "start_slot" => req.start_slot, @@ -349,7 +349,7 @@ impl Worker { } else { debug!( self.log, - "BlocksByRange Response processed"; + "BlocksByRange outgoing response processed"; "peer" => %peer_id, "start_slot" => req.start_slot, "current_slot" => current_slot, From 27bb9ff07ddac28b19e4357d3cb3aed4938fb423 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 29 Sep 2022 01:50:13 +0000 Subject: [PATCH 13/27] Handle Lodestar's new agent string (#3620) ## Issue Addressed #3561 ## Proposed Changes Recognize Lodestars new agent string and appropriately count these peers as lodestar peers. --- .../lighthouse_network/src/peer_manager/peerdb/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index f15f38daa6..dcc121b7f4 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -181,7 +181,7 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } (kind, version, os_version) } - Some("js-libp2p") => { + Some("js-libp2p") | Some("lodestar") => { let kind = ClientKind::Lodestar; let mut version = String::from("unknown"); let mut os_version = version.clone(); From 8d325e700b2df04d3a4ac51b6bd4b9f07df11f22 Mon Sep 17 00:00:00 2001 From: tim gretler Date: Thu, 29 Sep 2022 06:13:30 +0000 Subject: [PATCH 14/27] Use #!/usr/bin/env everywhere for local testnets (#3606) Full local testnet support for people that don't have `/bin/bash` --- scripts/local_testnet/clean.sh | 2 +- scripts/local_testnet/dump_logs.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/local_testnet/clean.sh b/scripts/local_testnet/clean.sh index b01b1a2dff..6db8753d02 100755 --- a/scripts/local_testnet/clean.sh +++ b/scripts/local_testnet/clean.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Deletes all files associated with the local testnet. diff --git a/scripts/local_testnet/dump_logs.sh b/scripts/local_testnet/dump_logs.sh index dc5f4edd38..64b7942fb6 100755 --- a/scripts/local_testnet/dump_logs.sh +++ b/scripts/local_testnet/dump_logs.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Print all the logs output from local testnet From f77e3bc0add947ed9a04d553734bfb6a01f46f92 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 29 Sep 2022 06:13:33 +0000 Subject: [PATCH 15/27] Add maxperf build profile (#3608) ## Proposed Changes Add a new Cargo compilation profile called `maxperf` which enables more aggressive compiler optimisations at the expense of compilation time. Some rough initial benchmarks show that this can provide up to a 25% reduction to run time for CPU bound tasks like block processing: https://docs.google.com/spreadsheets/d/15jHuZe7lLHhZq9Nw8kc6EL0Qh_N_YAYqkW2NQ_Afmtk/edit The numbers in that spreadsheet compare the `consensus-context` branch from #3604 to the same branch compiled with the `maxperf` profile using: ``` PROFILE=maxperf make install-lcli ``` ## Additional Info The downsides of the maxperf profile are: - It increases compile times substantially, which will particularly impact low-spec hardware. Compiling `lcli` is about 3x slower. Compiling Lighthouse is about 5x slower on my 5950X: 17m 38s rather than 3m 28s. As a result I think we should not enable this everywhere by default. - **Option 1**: enable by default for our released binaries. This gives the majority of users the fastest version of `lighthouse` possible, at the expense of slowing down our release CI. Source builds will continue to use the default `release` profile unless users opt-in to `maxperf`. - **Option 2**: enable by default for source builds. This gives users building from source an edge, but makes them pay for it with compilation time. I think I would prefer Option 1. I'll try doing some benchmarking to see how long a maxperf build of Lighthouse would take on GitHub actions. Credit to Nicholas Nethercote for documenting these options in the Rust Performance Book: https://nnethercote.github.io/perf-book/build-configuration.html. --- .github/workflows/docker.yml | 9 ++++++--- .github/workflows/release.yml | 30 +++++++++++++++++++----------- Cargo.toml | 6 ++++++ Makefile | 18 ++++++++++++------ book/src/cross-compiling.md | 6 ++++++ book/src/installation-source.md | 21 ++++++++++++++++++++- 6 files changed, 69 insertions(+), 21 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index b07f2ad3d4..8d72319c60 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -51,6 +51,9 @@ jobs: aarch64-portable, x86_64, x86_64-portable] + include: + - profile: maxperf + needs: [extract-version] env: # We need to enable experimental docker features in order to use `docker buildx` @@ -67,17 +70,17 @@ jobs: - name: Cross build Lighthouse binary run: | cargo install cross - make build-${{ matrix.binary }} + env CROSS_PROFILE=${{ matrix.profile }} make build-${{ matrix.binary }} - name: Move cross-built binary into Docker scope (if ARM) if: startsWith(matrix.binary, 'aarch64') run: | mkdir ./bin; - mv ./target/aarch64-unknown-linux-gnu/release/lighthouse ./bin; + mv ./target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin; - name: Move cross-built binary into Docker scope (if x86_64) if: startsWith(matrix.binary, 'x86_64') run: | mkdir ./bin; - mv ./target/x86_64-unknown-linux-gnu/release/lighthouse ./bin; + mv ./target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin; - name: Map aarch64 to arm64 short arch if: startsWith(matrix.binary, 'aarch64') run: echo "SHORT_ARCH=arm64" >> $GITHUB_ENV diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 341e53354c..158cbaa08b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -35,20 +35,28 @@ jobs: include: - arch: aarch64-unknown-linux-gnu platform: ubuntu-latest + profile: maxperf - arch: aarch64-unknown-linux-gnu-portable platform: ubuntu-latest + profile: maxperf - arch: x86_64-unknown-linux-gnu platform: ubuntu-latest + profile: maxperf - arch: x86_64-unknown-linux-gnu-portable platform: ubuntu-latest + profile: maxperf - arch: x86_64-apple-darwin platform: macos-latest + profile: maxperf - arch: x86_64-apple-darwin-portable platform: macos-latest + profile: maxperf - arch: x86_64-windows platform: windows-2019 + profile: maxperf - arch: x86_64-windows-portable platform: windows-2019 + profile: maxperf runs-on: ${{ matrix.platform }} needs: extract-version @@ -83,49 +91,49 @@ jobs: if: matrix.arch == 'aarch64-unknown-linux-gnu-portable' run: | cargo install cross - make build-aarch64-portable + env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64-portable - name: Build Lighthouse for aarch64-unknown-linux-gnu if: matrix.arch == 'aarch64-unknown-linux-gnu' run: | cargo install cross - make build-aarch64 + env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64 - name: Build Lighthouse for x86_64-unknown-linux-gnu-portable if: matrix.arch == 'x86_64-unknown-linux-gnu-portable' run: | cargo install cross - make build-x86_64-portable + env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64-portable - name: Build Lighthouse for x86_64-unknown-linux-gnu if: matrix.arch == 'x86_64-unknown-linux-gnu' run: | cargo install cross - make build-x86_64 + env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64 - name: Move cross-compiled binary if: startsWith(matrix.arch, 'aarch64') - run: mv target/aarch64-unknown-linux-gnu/release/lighthouse ~/.cargo/bin/lighthouse + run: mv target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ~/.cargo/bin/lighthouse - name: Move cross-compiled binary if: startsWith(matrix.arch, 'x86_64-unknown-linux-gnu') - run: mv target/x86_64-unknown-linux-gnu/release/lighthouse ~/.cargo/bin/lighthouse + run: mv target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ~/.cargo/bin/lighthouse - name: Build Lighthouse for x86_64-apple-darwin portable if: matrix.arch == 'x86_64-apple-darwin-portable' - run: cargo install --path lighthouse --force --locked --features portable,gnosis + run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - name: Build Lighthouse for x86_64-apple-darwin modern if: matrix.arch == 'x86_64-apple-darwin' - run: cargo install --path lighthouse --force --locked --features modern,gnosis + run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} - name: Build Lighthouse for Windows portable if: matrix.arch == 'x86_64-windows-portable' - run: cargo install --path lighthouse --force --locked --features portable,gnosis + run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - name: Build Lighthouse for Windows modern if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features modern,gnosis + run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} - name: Configure GPG and create artifacts if: startsWith(matrix.arch, 'x86_64-windows') != true @@ -212,7 +220,7 @@ jobs: ## Testing Checklist (DELETE ME) - + - [ ] Run on synced Prater Sigma Prime nodes. - [ ] Run on synced Canary (mainnet) Sigma Prime nodes. - [ ] Resync a Prater node. diff --git a/Cargo.toml b/Cargo.toml index 415c721d99..27120e217f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,3 +100,9 @@ eth2_hashing = { path = "crypto/eth2_hashing" } tree_hash = { path = "consensus/tree_hash" } tree_hash_derive = { path = "consensus/tree_hash_derive" } eth2_serde_utils = { path = "consensus/serde_utils" } + +[profile.maxperf] +inherits = "release" +lto = "fat" +codegen-units = 1 +incremental = false diff --git a/Makefile b/Makefile index 3bf23a4cea..33077a6c93 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,12 @@ CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 # List of features to use when cross-compiling. Can be overridden via the environment. CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx +# Cargo profile for Cross builds. Default is for local builds, CI uses an override. +CROSS_PROFILE ?= release + +# Cargo profile for regular builds. +PROFILE ?= release + # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. FORKS=phase0 altair merge @@ -25,11 +31,11 @@ FORKS=phase0 altair merge # # Binaries will most likely be found in `./target/release` install: - cargo install --path lighthouse --force --locked --features "$(FEATURES)" + cargo install --path lighthouse --force --locked --features "$(FEATURES)" --profile "$(PROFILE)" # Builds the lcli binary in release (optimized). install-lcli: - cargo install --path lcli --force --locked --features "$(FEATURES)" + cargo install --path lcli --force --locked --features "$(FEATURES)" --profile "$(PROFILE)" # The following commands use `cross` to build a cross-compile. # @@ -45,13 +51,13 @@ install-lcli: # optimized CPU functions that may not be available on some systems. This # results in a more portable binary with ~20% slower BLS verification. build-x86_64: - cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" build-x86_64-portable: - cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" build-aarch64: - cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" build-aarch64-portable: - cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md index 8ccf23da9d..7cf7f4feb1 100644 --- a/book/src/cross-compiling.md +++ b/book/src/cross-compiling.md @@ -44,3 +44,9 @@ in `lighthouse/target/aarch64-unknown-linux-gnu/release`. When using the makefile the set of features used for building can be controlled with the environment variable `CROSS_FEATURES`. See [Feature Flags](./installation-source.md#feature-flags) for available features. + +## Compilation Profiles + +When using the makefile the build profile can be controlled with the environment variable +`CROSS_PROFILE`. See [Compilation Profiles](./installation-source.md#compilation-profiles) for +available profiles. diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 61eb206421..b3d83ef9f9 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -120,7 +120,7 @@ You can customise the features that Lighthouse is built with using the `FEATURES variable. E.g. ``` -env FEATURES="gnosis,slasher-lmdb" make +FEATURES=gnosis,slasher-lmdb make ``` Commonly used features include: @@ -131,6 +131,25 @@ Commonly used features include: * `slasher-mdbx`: support for the MDBX slasher backend (enabled by default). * `slasher-lmdb`: support for the LMDB slasher backend. +## Compilation Profiles + +You can customise the compiler settings used to compile Lighthouse via +[Cargo profiles](https://doc.rust-lang.org/cargo/reference/profiles.html). + +Lighthouse includes several profiles which can be selected via the `PROFILE` environment variable. + +* `release`: default for source builds, enables most optimisations while not taking too long to + compile. +* `maxperf`: default for binary releases, enables aggressive optimisations including full LTO. + Although compiling with this profile improves some benchmarks by around 20% compared to `release`, + it imposes a _significant_ cost at compile time and is only recommended if you have a fast CPU. + +To compile with `maxperf`: + +``` +PROFILE=maxperf make +``` + ## Troubleshooting ### Command is not found From ff145b986fa423a76ffc0f01a2f4d3e0d11298ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Kj=C3=A6rstad?= Date: Thu, 29 Sep 2022 06:13:35 +0000 Subject: [PATCH 16/27] Changed http:// to https:// on mailing list link (#3610) Changed http:// to https:// on mailing list link in README.md Co-authored-by: Michael Sproul --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index aa3cc020e1..859d5c4c63 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ of the Lighthouse book. The best place for discussion is the [Lighthouse Discord server](https://discord.gg/cyAszAh). -Sign up to the [Lighthouse Development Updates](http://eepurl.com/dh9Lvb) mailing list for email +Sign up to the [Lighthouse Development Updates](https://eepurl.com/dh9Lvb/) mailing list for email notifications about releases, network status and other important information. Encrypt sensitive messages using our [PGP From 58bd2f76d0819c7147684cb10f994480545ee769 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 3 Oct 2022 23:09:25 +0000 Subject: [PATCH 17/27] Ensure protoc is installed for release CI (#3621) ## Issue Addressed The release CI is currently broken due to the addition of the `protoc` dependency. Here's a failure of the release flow running on my fork: https://github.com/michaelsproul/lighthouse/actions/runs/3155541478/jobs/5134317334 ## Proposed Changes - Install `protoc` on Windows and Mac so that it's available for `cargo install`. - Install an x86_64 binary in the Cross image for the aarch64 platform: we need a binary that runs on the host, _not_ on the target. - Fix `macos` local testnet CI by using the Github API key to dodge rate limiting (this issue: https://github.com/actions/runner-images/issues/602). --- .github/workflows/local-testnet.yml | 2 ++ .github/workflows/release.yml | 9 +++++++++ Cross.toml | 4 ++-- lcli/Dockerfile | 2 +- ..._64-unknown-linux-gnu.dockerfile => Dockerfile} | 0 scripts/cross/aarch64-unknown-linux-gnu.dockerfile | 14 -------------- 6 files changed, 14 insertions(+), 17 deletions(-) rename scripts/cross/{x86_64-unknown-linux-gnu.dockerfile => Dockerfile} (100%) delete mode 100644 scripts/cross/aarch64-unknown-linux-gnu.dockerfile diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index c688c0df33..170bd9e212 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -22,6 +22,8 @@ jobs: run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: npm install ganache@latest --global diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 158cbaa08b..6edb1f76c1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -83,6 +83,15 @@ jobs: if: startsWith(matrix.arch, 'x86_64-windows') run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV + # ============================== + # Windows & Mac dependencies + # ============================== + - name: Install Protoc + if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows') + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + # ============================== # Builds # ============================== diff --git a/Cross.toml b/Cross.toml index 963e22d0e0..9c3e441cba 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -dockerfile = './scripts/cross/x86_64-unknown-linux-gnu.dockerfile' +dockerfile = './scripts/cross/Dockerfile' [target.aarch64-unknown-linux-gnu] -dockerfile = './scripts/cross/aarch64-unknown-linux-gnu.dockerfile' +dockerfile = './scripts/cross/Dockerfile' diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 8fd3567cdc..1129e710f4 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -2,7 +2,7 @@ # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE diff --git a/scripts/cross/x86_64-unknown-linux-gnu.dockerfile b/scripts/cross/Dockerfile similarity index 100% rename from scripts/cross/x86_64-unknown-linux-gnu.dockerfile rename to scripts/cross/Dockerfile diff --git a/scripts/cross/aarch64-unknown-linux-gnu.dockerfile b/scripts/cross/aarch64-unknown-linux-gnu.dockerfile deleted file mode 100644 index 691639cd41..0000000000 --- a/scripts/cross/aarch64-unknown-linux-gnu.dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -ARG CROSS_BASE_IMAGE -FROM $CROSS_BASE_IMAGE - -RUN apt-get update -y && apt-get upgrade -y - -RUN apt-get install -y unzip && \ - PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \ - curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-aarch_64.zip -o protoc.zip && \ - unzip protoc.zip -d /usr && \ - chmod +x /usr/bin/protoc - -RUN apt-get install -y cmake clang-3.9 - -ENV PROTOC=/usr/bin/protoc From 8728c40102dabefb6a43c8335da0051a0d986f0d Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 4 Oct 2022 08:33:39 +0000 Subject: [PATCH 18/27] Remove fallback support from eth1 service (#3594) ## Issue Addressed N/A ## Proposed Changes With https://github.com/sigp/lighthouse/pull/3214 we made it such that you can either have 1 auth endpoint or multiple non auth endpoints. Now that we are post merge on all networks (testnets and mainnet), we cannot progress a chain without a dedicated auth execution layer connection so there is no point in having a non-auth eth1-endpoint for syncing deposit cache. This code removes all fallback related code in the eth1 service. We still keep the single non-auth endpoint since it's useful for testing. ## Additional Info This removes all eth1 fallback related metrics that were relevant for the monitoring service, so we might need to change the api upstream. --- Cargo.lock | 8 - Cargo.toml | 1 - beacon_node/beacon_chain/src/builder.rs | 2 +- beacon_node/beacon_chain/src/eth1_chain.rs | 17 +- beacon_node/client/src/builder.rs | 12 +- beacon_node/eth1/Cargo.toml | 1 - beacon_node/eth1/src/inner.rs | 10 +- beacon_node/eth1/src/metrics.rs | 20 - beacon_node/eth1/src/service.rs | 403 ++++-------------- beacon_node/eth1/tests/test.rs | 352 +++------------ .../genesis/src/eth1_genesis_service.rs | 15 +- beacon_node/genesis/tests/tests.rs | 10 +- beacon_node/http_api/tests/common.rs | 3 +- beacon_node/src/cli.rs | 6 +- beacon_node/src/config.rs | 17 +- beacon_node/src/lib.rs | 2 +- common/fallback/Cargo.toml | 10 - common/fallback/src/lib.rs | 63 --- common/monitoring_api/src/gather.rs | 20 + lcli/src/eth1_genesis.rs | 17 +- lighthouse/tests/beacon_node.rs | 23 +- testing/simulator/src/eth1_sim.rs | 18 +- 22 files changed, 228 insertions(+), 802 deletions(-) delete mode 100644 common/fallback/Cargo.toml delete mode 100644 common/fallback/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index cfefa6c116..8fb8c54929 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1661,7 +1661,6 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "execution_layer", - "fallback", "futures", "hex", "lazy_static", @@ -2117,13 +2116,6 @@ dependencies = [ "futures", ] -[[package]] -name = "fallback" -version = "0.1.0" -dependencies = [ - "itertools", -] - [[package]] name = "fallible-iterator" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 27120e217f..02cf4d9436 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,7 +43,6 @@ members = [ "common/unused_port", "common/validator_dir", "common/warp_utils", - "common/fallback", "common/monitoring_api", "database_manager", diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 916ebd2359..051b84f816 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -897,7 +897,7 @@ where .ok_or("dummy_eth1_backend requires a log")?; let backend = - CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone()); + CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone())?; self.eth1_chain = Some(Eth1Chain::new_dummy(backend)); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 8dd101b726..3d24becc84 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -431,12 +431,13 @@ impl CachingEth1Backend { /// Instantiates `self` with empty caches. /// /// Does not connect to the eth1 node or start any tasks to keep the cache updated. - pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self { - Self { - core: HttpService::new(config, log.clone(), spec), + pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result { + Ok(Self { + core: HttpService::new(config, log.clone(), spec) + .map_err(|e| format!("Failed to create eth1 http service: {:?}", e))?, log, _phantom: PhantomData, - } + }) } /// Starts the routine which connects to the external eth1 node and updates the caches. @@ -730,11 +731,9 @@ mod test { }; let log = null_logger().unwrap(); - Eth1Chain::new(CachingEth1Backend::new( - eth1_config, - log, - MainnetEthSpec::default_spec(), - )) + Eth1Chain::new( + CachingEth1Backend::new(eth1_config, log, MainnetEthSpec::default_spec()).unwrap(), + ) } fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 752ba3b7bc..a46d91ad1e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -370,7 +370,7 @@ where info!( context.log(), "Waiting for eth2 genesis from eth1"; - "eth1_endpoints" => format!("{:?}", &config.eth1.endpoints), + "eth1_endpoints" => format!("{:?}", &config.eth1.endpoint), "contract_deploy_block" => config.eth1.deposit_contract_deploy_block, "deposit_contract" => &config.eth1.deposit_contract_address ); @@ -379,7 +379,7 @@ where config.eth1, context.log().clone(), context.eth2_config().spec.clone(), - ); + )?; // If the HTTP API server is enabled, start an instance of it where it only // contains a reference to the eth1 service (all non-eth1 endpoints will fail @@ -875,7 +875,7 @@ where CachingEth1Backend::from_service(eth1_service_from_genesis) } else if config.purge_cache { - CachingEth1Backend::new(config, context.log().clone(), spec) + CachingEth1Backend::new(config, context.log().clone(), spec)? } else { beacon_chain_builder .get_persisted_eth1_backend()? @@ -889,11 +889,7 @@ where .map(|chain| chain.into_backend()) }) .unwrap_or_else(|| { - Ok(CachingEth1Backend::new( - config, - context.log().clone(), - spec.clone(), - )) + CachingEth1Backend::new(config, context.log().clone(), spec.clone()) })? }; diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 403869cc9c..930301256c 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -31,5 +31,4 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics"} lazy_static = "1.4.0" task_executor = { path = "../../common/task_executor" } eth2 = { path = "../../common/eth2" } -fallback = { path = "../../common/fallback" } sensitive_url = { path = "../../common/sensitive_url" } diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index 9a57f450e1..b0a951bef0 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -1,14 +1,14 @@ +use crate::service::endpoint_from_config; use crate::Config; use crate::{ block_cache::{BlockCache, Eth1Block}, deposit_cache::{DepositCache, SszDepositCache}, - service::EndpointsCache, }; +use execution_layer::HttpJsonRpc; use parking_lot::RwLock; use ssz::four_byte_option_impl; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::sync::Arc; use types::ChainSpec; // Define "legacy" implementations of `Option` which use four bytes for encoding the union @@ -31,11 +31,10 @@ impl DepositUpdater { } } -#[derive(Default)] pub struct Inner { pub block_cache: RwLock, pub deposit_cache: RwLock, - pub endpoints_cache: RwLock>>, + pub endpoint: HttpJsonRpc, pub config: RwLock, pub remote_head_block: RwLock>, pub spec: ChainSpec, @@ -96,7 +95,8 @@ impl SszEth1Cache { cache: self.deposit_cache.to_deposit_cache()?, last_processed_block: self.last_processed_block, }), - endpoints_cache: RwLock::new(None), + endpoint: endpoint_from_config(&config) + .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, // Set the remote head_block zero when creating a new instance. We only care about // present and future eth1 nodes. remote_head_block: RwLock::new(None), diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs index f3d9483b2b..5441b40d7e 100644 --- a/beacon_node/eth1/src/metrics.rs +++ b/beacon_node/eth1/src/metrics.rs @@ -17,16 +17,6 @@ lazy_static! { pub static ref HIGHEST_PROCESSED_DEPOSIT_BLOCK: Result = try_create_int_gauge("eth1_highest_processed_deposit_block", "Number of the last block checked for deposits"); - /* - * Eth1 endpoint errors - */ - pub static ref ENDPOINT_ERRORS: Result = try_create_int_counter_vec( - "eth1_endpoint_errors", "The number of eth1 request errors for each endpoint", &["endpoint"] - ); - pub static ref ENDPOINT_REQUESTS: Result = try_create_int_counter_vec( - "eth1_endpoint_requests", "The number of eth1 requests for each endpoint", &["endpoint"] - ); - /* * Eth1 rpc connection */ @@ -35,14 +25,4 @@ lazy_static! { "sync_eth1_connected", "Set to 1 if connected to an eth1 node, otherwise set to 0" ); - pub static ref ETH1_FALLBACK_CONFIGURED: Result = try_create_int_gauge( - "sync_eth1_fallback_configured", "Number of configured eth1 fallbacks" - ); - - // Note: This metric only checks if an eth1 fallback is configured, not if it is connected and synced. - // Checking for liveness of the fallback would require moving away from lazy checking of fallbacks. - pub static ref ETH1_FALLBACK_CONNECTED: Result = try_create_int_gauge( - "eth1_sync_fallback_connected", "Set to 1 if an eth1 fallback is connected, otherwise set to 0" - ); - } diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index a4d4e5e254..fae6eef9c2 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -9,19 +9,16 @@ use execution_layer::http::{ deposit_methods::{BlockQuery, Eth1Id}, HttpJsonRpc, }; -use fallback::{Fallback, FallbackError}; use futures::future::TryFutureExt; use parking_lot::{RwLock, RwLockReadGuard}; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slog::{debug, error, info, trace, warn, Logger}; use std::fmt::Debug; -use std::future::Future; use std::ops::{Range, RangeInclusive}; use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::sync::RwLock as TRwLock; use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, EthSpec, Unsigned}; @@ -53,127 +50,12 @@ const CACHE_FACTOR: u64 = 2; #[derive(Debug, PartialEq, Clone)] pub enum EndpointError { RequestFailed(String), - WrongNetworkId, WrongChainId, FarBehind, } type EndpointState = Result<(), EndpointError>; -pub struct EndpointWithState { - client: HttpJsonRpc, - state: TRwLock>, -} - -impl EndpointWithState { - pub fn new(client: HttpJsonRpc) -> Self { - Self { - client, - state: TRwLock::new(None), - } - } -} - -async fn reset_endpoint_state(endpoint: &EndpointWithState) { - *endpoint.state.write().await = None; -} - -async fn get_state(endpoint: &EndpointWithState) -> Option { - endpoint.state.read().await.clone() -} - -/// A cache structure to lazily check usability of endpoints. An endpoint is usable if it is -/// reachable and has the correct network id and chain id. Emits a `WARN` log if a checked endpoint -/// is not usable. -pub struct EndpointsCache { - pub fallback: Fallback, - pub config_chain_id: Eth1Id, - pub log: Logger, -} - -impl EndpointsCache { - /// Checks the usability of an endpoint. Results get cached and therefore only the first call - /// for each endpoint does the real check. - async fn state(&self, endpoint: &EndpointWithState) -> EndpointState { - if let Some(result) = endpoint.state.read().await.clone() { - return result; - } - let mut value = endpoint.state.write().await; - if let Some(result) = value.clone() { - return result; - } - crate::metrics::inc_counter_vec( - &crate::metrics::ENDPOINT_REQUESTS, - &[&endpoint.client.to_string()], - ); - let state = endpoint_state(&endpoint.client, &self.config_chain_id, &self.log).await; - *value = Some(state.clone()); - if state.is_err() { - crate::metrics::inc_counter_vec( - &crate::metrics::ENDPOINT_ERRORS, - &[&endpoint.client.to_string()], - ); - crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); - } else { - crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 1); - } - state - } - - /// Return the first successful result along with number of previous errors encountered - /// or all the errors encountered if every none of the fallback endpoints return required output. - pub async fn first_success<'a, F, O, R>( - &'a self, - func: F, - ) -> Result<(O, usize), FallbackError> - where - F: Fn(&'a HttpJsonRpc) -> R, - R: Future>, - { - let func = &func; - self.fallback - .first_success(|endpoint| async move { - match self.state(endpoint).await { - Ok(()) => { - let endpoint_str = &endpoint.client.to_string(); - crate::metrics::inc_counter_vec( - &crate::metrics::ENDPOINT_REQUESTS, - &[endpoint_str], - ); - match func(&endpoint.client).await { - Ok(t) => Ok(t), - Err(t) => { - crate::metrics::inc_counter_vec( - &crate::metrics::ENDPOINT_ERRORS, - &[endpoint_str], - ); - if let SingleEndpointError::EndpointError(e) = &t { - *endpoint.state.write().await = Some(Err(e.clone())); - } else { - // A non-`EndpointError` error occurred, so reset the state. - reset_endpoint_state(endpoint).await; - } - Err(t) - } - } - } - Err(e) => Err(SingleEndpointError::EndpointError(e)), - } - }) - .await - } - - pub async fn reset_errorred_endpoints(&self) { - for endpoint in &self.fallback.servers { - if let Some(state) = get_state(endpoint).await { - if state.is_err() { - reset_endpoint_state(endpoint).await; - } - } - } - } -} - /// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and /// chain id. Otherwise it returns `Err`. async fn endpoint_state( @@ -186,7 +68,6 @@ async fn endpoint_state( log, "Error connecting to eth1 node endpoint"; "endpoint" => %endpoint, - "action" => "trying fallbacks" ); EndpointError::RequestFailed(e) }; @@ -202,7 +83,6 @@ async fn endpoint_state( log, "Remote execution node is not synced"; "endpoint" => %endpoint, - "action" => "trying fallbacks" ); return Err(EndpointError::FarBehind); } @@ -211,7 +91,6 @@ async fn endpoint_state( log, "Invalid execution chain ID. Please switch to correct chain ID on endpoint"; "endpoint" => %endpoint, - "action" => "trying fallbacks", "expected" => ?config_chain_id, "received" => ?chain_id, ); @@ -240,7 +119,7 @@ async fn get_remote_head_and_new_block_ranges( Option>, Option>, ), - SingleEndpointError, + Error, > { let remote_head_block = download_eth1_block(endpoint, service.inner.clone(), None).await?; let now = SystemTime::now() @@ -253,18 +132,16 @@ async fn get_remote_head_and_new_block_ranges( "Execution endpoint is not synced"; "endpoint" => %endpoint, "last_seen_block_unix_timestamp" => remote_head_block.timestamp, - "action" => "trying fallback" ); - return Err(SingleEndpointError::EndpointError(EndpointError::FarBehind)); + return Err(Error::EndpointError(EndpointError::FarBehind)); } let handle_remote_not_synced = |e| { - if let SingleEndpointError::RemoteNotSynced { .. } = e { + if let Error::RemoteNotSynced { .. } = e { warn!( service.log, "Execution endpoint is not synced"; "endpoint" => %endpoint, - "action" => "trying fallbacks" ); } e @@ -296,16 +173,25 @@ async fn relevant_new_block_numbers_from_endpoint( endpoint: &HttpJsonRpc, service: &Service, head_type: HeadType, -) -> Result>, SingleEndpointError> { +) -> Result>, Error> { let remote_highest_block = endpoint .get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) - .map_err(SingleEndpointError::GetBlockNumberFailed) + .map_err(Error::GetBlockNumberFailed) .await?; service.relevant_new_block_numbers(remote_highest_block, None, head_type) } #[derive(Debug, PartialEq)] -pub enum SingleEndpointError { +pub enum Error { + /// There was an inconsistency when adding a block to the cache. + FailedToInsertEth1Block(BlockCacheError), + /// There was an inconsistency when adding a deposit to the cache. + FailedToInsertDeposit(DepositCacheError), + /// A log downloaded from the eth1 contract was not well formed. + FailedToParseDepositLog { + block_range: Range, + error: String, + }, /// Endpoint is currently not functional. EndpointError(EndpointError), /// The remote node is less synced that we expect, it is not useful until has done more @@ -325,21 +211,6 @@ pub enum SingleEndpointError { GetDepositCountFailed(String), /// Failed to read the deposit contract root from the eth1 node. GetDepositLogsFailed(String), -} - -#[derive(Debug, PartialEq)] -pub enum Error { - /// There was an inconsistency when adding a block to the cache. - FailedToInsertEth1Block(BlockCacheError), - /// There was an inconsistency when adding a deposit to the cache. - FailedToInsertDeposit(DepositCacheError), - /// A log downloaded from the eth1 contract was not well formed. - FailedToParseDepositLog { - block_range: Range, - error: String, - }, - /// All possible endpoints returned a `SingleEndpointError`. - FallbackError(FallbackError), /// There was an unexpected internal error. Internal(String), } @@ -367,21 +238,14 @@ pub enum Eth1Endpoint { jwt_id: Option, jwt_version: Option, }, - NoAuth(Vec), + NoAuth(SensitiveUrl), } impl Eth1Endpoint { - fn len(&self) -> usize { + pub fn get_endpoint(&self) -> SensitiveUrl { match &self { - Self::Auth { .. } => 1, - Self::NoAuth(urls) => urls.len(), - } - } - - pub fn get_endpoints(&self) -> Vec { - match &self { - Self::Auth { endpoint, .. } => vec![endpoint.clone()], - Self::NoAuth(endpoints) => endpoints.clone(), + Self::Auth { endpoint, .. } => endpoint.clone(), + Self::NoAuth(endpoint) => endpoint.clone(), } } } @@ -389,7 +253,7 @@ impl Eth1Endpoint { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { /// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint. - pub endpoints: Eth1Endpoint, + pub endpoint: Eth1Endpoint, /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. pub deposit_contract_address: String, /// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet). @@ -466,8 +330,10 @@ impl Config { impl Default for Config { fn default() -> Self { Self { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) - .expect("The default Eth1 endpoint must always be a valid URL.")]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) + .expect("The default Eth1 endpoint must always be a valid URL."), + ), deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), chain_id: DEFAULT_CHAIN_ID, deposit_contract_deploy_block: 1, @@ -485,6 +351,24 @@ impl Default for Config { } } +pub fn endpoint_from_config(config: &Config) -> Result { + match config.endpoint.clone() { + Eth1Endpoint::Auth { + endpoint, + jwt_path, + jwt_id, + jwt_version, + } => { + let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) + .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; + HttpJsonRpc::new_with_auth(endpoint, auth) + .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) + } + Eth1Endpoint::NoAuth(endpoint) => HttpJsonRpc::new(endpoint) + .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)), + } +} + /// Provides a set of Eth1 caches and async functions to update them. /// /// Stores the following caches: @@ -499,20 +383,24 @@ pub struct Service { impl Service { /// Creates a new service. Does not attempt to connect to the eth1 node. - pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Self { - Self { + pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Result { + Ok(Self { inner: Arc::new(Inner { block_cache: <_>::default(), deposit_cache: RwLock::new(DepositUpdater::new( config.deposit_contract_deploy_block, )), - endpoints_cache: RwLock::new(None), + endpoint: endpoint_from_config(&config)?, remote_head_block: RwLock::new(None), config: RwLock::new(config), spec, }), log, - } + }) + } + + pub fn client(&self) -> &HttpJsonRpc { + &self.inner.endpoint } /// Returns the follow distance that has been shortened to accommodate for differences in the @@ -676,52 +564,6 @@ impl Service { self.inner.config.write().lowest_cached_block_number = block_number; } - /// Builds a new `EndpointsCache` with empty states. - pub fn init_endpoints(&self) -> Result, String> { - let endpoints = self.config().endpoints.clone(); - let config_chain_id = self.config().chain_id.clone(); - - let servers = match endpoints { - Eth1Endpoint::Auth { - jwt_path, - endpoint, - jwt_id, - jwt_version, - } => { - let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) - .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; - vec![HttpJsonRpc::new_with_auth(endpoint, auth) - .map_err(|e| format!("Failed to build auth enabled json rpc {:?}", e))?] - } - Eth1Endpoint::NoAuth(urls) => urls - .into_iter() - .map(|url| { - HttpJsonRpc::new(url).map_err(|e| format!("Failed to build json rpc {:?}", e)) - }) - .collect::>()?, - }; - let new_cache = Arc::new(EndpointsCache { - fallback: Fallback::new(servers.into_iter().map(EndpointWithState::new).collect()), - config_chain_id, - log: self.log.clone(), - }); - - let mut endpoints_cache = self.inner.endpoints_cache.write(); - *endpoints_cache = Some(new_cache.clone()); - Ok(new_cache) - } - - /// Returns the cached `EndpointsCache` if it exists or builds a new one. - pub fn get_endpoints(&self) -> Result, String> { - let endpoints_cache = self.inner.endpoints_cache.read(); - if let Some(cache) = endpoints_cache.clone() { - Ok(cache) - } else { - drop(endpoints_cache); - self.init_endpoints() - } - } - /// Update the deposit and block cache, returning an error if either fail. /// /// ## Returns @@ -733,56 +575,28 @@ impl Service { pub async fn update( &self, ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { - let endpoints = self.get_endpoints()?; - - // Reset the state of any endpoints which have errored so their state can be redetermined. - endpoints.reset_errorred_endpoints().await; - + let client = self.client(); + let log = self.log.clone(); + let chain_id = self.config().chain_id.clone(); let node_far_behind_seconds = self.inner.config.read().node_far_behind_seconds; - let process_single_err = |e: &FallbackError| { - match e { - FallbackError::AllErrored(errors) => { - if errors - .iter() - .all(|error| matches!(error, SingleEndpointError::EndpointError(_))) - { - error!( - self.log, - "No synced execution endpoint"; - "advice" => "ensure you have an execution node configured via \ - --execution-endpoint or if pre-merge, --eth1-endpoints" - ); - } - } + match endpoint_state(client, &chain_id, &log).await { + Ok(()) => crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 1), + Err(e) => { + crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); + return Err(format!("Invalid endpoint state: {:?}", e)); } - endpoints.fallback.map_format_error(|s| &s.client, e) - }; - - let process_err = |e: Error| match &e { - Error::FallbackError(f) => process_single_err(f), - e => format!("{:?}", e), - }; - - let ( - (remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache), - num_errors, - ) = endpoints - .first_success(|e| async move { - get_remote_head_and_new_block_ranges(e, self, node_far_behind_seconds).await - }) - .await - .map_err(|e| format!("{:?}", process_single_err(&e)))?; - - if num_errors > 0 { - info!(self.log, "Fetched data from fallback"; "fallback_number" => num_errors); } + let (remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache) = + get_remote_head_and_new_block_ranges(client, self, node_far_behind_seconds) + .await + .map_err(|e| format!("Failed to get remote head and new block ranges: {:?}", e))?; *self.inner.remote_head_block.write() = Some(remote_head_block); let update_deposit_cache = async { let outcome_result = self - .update_deposit_cache(Some(new_block_numbers_deposit), &endpoints) + .update_deposit_cache(Some(new_block_numbers_deposit)) .await; // Reset the `last_procesed block` to the last valid deposit's block number. @@ -804,8 +618,8 @@ impl Service { deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number(); } - let outcome = outcome_result - .map_err(|e| format!("Failed to update deposit cache: {:?}", process_err(e)))?; + let outcome = + outcome_result.map_err(|e| format!("Failed to update deposit cache: {:?}", e))?; trace!( self.log, @@ -819,14 +633,9 @@ impl Service { let update_block_cache = async { let outcome = self - .update_block_cache(Some(new_block_numbers_block_cache), &endpoints) + .update_block_cache(Some(new_block_numbers_block_cache)) .await - .map_err(|e| { - format!( - "Failed to update deposit contract block cache: {:?}", - process_err(e) - ) - })?; + .map_err(|e| format!("Failed to update deposit contract block cache: {:?}", e))?; trace!( self.log, @@ -858,7 +667,6 @@ impl Service { let mut interval = interval_at(Instant::now(), update_interval); - let num_fallbacks = self.config().endpoints.len() - 1; let update_future = async move { loop { interval.tick().await; @@ -866,15 +674,6 @@ impl Service { } }; - // Set the number of configured eth1 servers - metrics::set_gauge(&metrics::ETH1_FALLBACK_CONFIGURED, num_fallbacks as i64); - // Since we lazily update eth1 fallbacks, it's not possible to know connection status of fallback. - // Hence, we set it to 1 if we have atleast one configured fallback. - if num_fallbacks > 0 { - metrics::set_gauge(&metrics::ETH1_FALLBACK_CONNECTED, 1); - } else { - metrics::set_gauge(&metrics::ETH1_FALLBACK_CONNECTED, 0); - } handle.spawn(update_future, "eth1"); } @@ -904,7 +703,7 @@ impl Service { remote_highest_block_number: u64, remote_highest_block_timestamp: Option, head_type: HeadType, - ) -> Result>, SingleEndpointError> { + ) -> Result>, Error> { let follow_distance = self.cache_follow_distance(); let latest_cached_block = self.latest_cached_block(); let next_required_block = match head_type { @@ -948,8 +747,8 @@ impl Service { pub async fn update_deposit_cache( &self, new_block_numbers: Option>>, - endpoints: &EndpointsCache, ) -> Result { + let client = self.client(); let deposit_contract_address = self.config().deposit_contract_address.clone(); let blocks_per_log_query = self.config().blocks_per_log_query; @@ -961,13 +760,10 @@ impl Service { let range = { match new_block_numbers { Some(range) => range, - None => endpoints - .first_success(|e| async move { - relevant_new_block_numbers_from_endpoint(e, self, HeadType::Deposit).await - }) - .await - .map(|(res, _)| res) - .map_err(Error::FallbackError)?, + None => { + relevant_new_block_numbers_from_endpoint(client, self, HeadType::Deposit) + .await? + } } }; @@ -1001,20 +797,14 @@ impl Service { * Step 1. Download logs. */ let block_range_ref = &block_range; - let logs = endpoints - .first_success(|endpoint| async move { - endpoint - .get_deposit_logs_in_range( - deposit_contract_address_ref, - block_range_ref.clone(), - Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), - ) - .await - .map_err(SingleEndpointError::GetDepositLogsFailed) - }) + let logs = client + .get_deposit_logs_in_range( + deposit_contract_address_ref, + block_range_ref.clone(), + Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), + ) .await - .map(|(res, _)| res) - .map_err(Error::FallbackError)?; + .map_err(Error::GetDepositLogsFailed)?; /* * Step 2. Import logs to cache. @@ -1050,7 +840,7 @@ impl Service { logs_imported += 1; } - Ok(()) + Ok::<_, Error>(()) })?; debug!( @@ -1105,8 +895,8 @@ impl Service { pub async fn update_block_cache( &self, new_block_numbers: Option>>, - endpoints: &EndpointsCache, ) -> Result { + let client = self.client(); let block_cache_truncation = self.config().block_cache_truncation; let max_blocks_per_update = self .config() @@ -1116,14 +906,10 @@ impl Service { let range = { match new_block_numbers { Some(range) => range, - None => endpoints - .first_success(|e| async move { - relevant_new_block_numbers_from_endpoint(e, self, HeadType::BlockCache) - .await - }) - .await - .map(|(res, _)| res) - .map_err(Error::FallbackError)?, + None => { + relevant_new_block_numbers_from_endpoint(client, self, HeadType::BlockCache) + .await? + } } }; @@ -1183,13 +969,8 @@ impl Service { let mut blocks_imported = 0; for block_number in required_block_numbers { - let eth1_block = endpoints - .first_success(|e| async move { - download_eth1_block(e, self.inner.clone(), Some(block_number)).await - }) - .await - .map(|(res, _)| res) - .map_err(Error::FallbackError)?; + let eth1_block = + download_eth1_block(client, self.inner.clone(), Some(block_number)).await?; self.inner .block_cache @@ -1269,7 +1050,7 @@ fn relevant_block_range( cache_follow_distance: u64, latest_cached_block: Option<&Eth1Block>, spec: &ChainSpec, -) -> Result>, SingleEndpointError> { +) -> Result>, Error> { // If the latest cached block is lagging the head block by more than `cache_follow_distance` // times the expected block time then the eth1 block time is likely quite different from what we // assumed. @@ -1304,7 +1085,7 @@ fn relevant_block_range( // // We assume that the `cache_follow_distance` should be sufficient to ensure this never // happens, otherwise it is an error. - Err(SingleEndpointError::RemoteNotSynced { + Err(Error::RemoteNotSynced { next_required_block, remote_highest_block: remote_highest_block_number, cache_follow_distance, @@ -1325,7 +1106,7 @@ async fn download_eth1_block( endpoint: &HttpJsonRpc, cache: Arc, block_number_opt: Option, -) -> Result { +) -> Result { let deposit_root = block_number_opt.and_then(|block_number| { cache .deposit_cache @@ -1350,7 +1131,7 @@ async fn download_eth1_block( .unwrap_or_else(|| BlockQuery::Latest), Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), ) - .map_err(SingleEndpointError::BlockDownloadFailed) + .map_err(Error::BlockDownloadFailed) .await?; Ok(Eth1Block { diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index f7f3b6e703..9f81f91e19 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -117,10 +117,9 @@ mod eth1_cache { let initial_block_number = get_block_number(&web3).await; let config = Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance, @@ -128,7 +127,8 @@ mod eth1_cache { }; let cache_follow_distance = config.cache_follow_distance(); - let service = Service::new(config, log.clone(), MainnetEthSpec::default_spec()); + let service = + Service::new(config, log.clone(), MainnetEthSpec::default_spec()).unwrap(); // Create some blocks and then consume them, performing the test `rounds` times. for round in 0..2 { @@ -149,19 +149,17 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints().unwrap(); - service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should update deposit cache"); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should update block cache"); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should update cache when nothing has changed"); @@ -201,10 +199,9 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -213,7 +210,8 @@ mod eth1_cache { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); let blocks = cache_len * 2; @@ -221,14 +219,12 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints().unwrap(); - service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should update deposit cache"); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should update block cache"); @@ -258,10 +254,9 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -270,19 +265,19 @@ mod eth1_cache { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); for _ in 0..4u8 { for _ in 0..cache_len / 2 { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints().unwrap(); service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should update deposit cache"); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should update block cache"); } @@ -311,10 +306,9 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -322,21 +316,21 @@ mod eth1_cache { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); for _ in 0..n { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints().unwrap(); futures::try_join!( - service.update_deposit_cache(None, &endpoints), - service.update_deposit_cache(None, &endpoints) + service.update_deposit_cache(None), + service.update_deposit_cache(None) ) .expect("should perform two simultaneous updates of deposit cache"); futures::try_join!( - service.update_block_cache(None, &endpoints), - service.update_block_cache(None, &endpoints) + service.update_block_cache(None), + service.update_block_cache(None) ) .expect("should perform two simultaneous updates of block cache"); @@ -366,10 +360,9 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, follow_distance: 0, @@ -377,7 +370,8 @@ mod deposit_tree { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); for round in 0..3 { let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); @@ -389,15 +383,13 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints().unwrap(); - service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should perform update"); service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should perform update when nothing has changed"); @@ -449,10 +441,9 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, lowest_cached_block_number: start_block, @@ -461,7 +452,8 @@ mod deposit_tree { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); @@ -472,10 +464,9 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints().unwrap(); futures::try_join!( - service.update_deposit_cache(None, &endpoints), - service.update_deposit_cache(None, &endpoints) + service.update_deposit_cache(None), + service.update_deposit_cache(None) ) .expect("should perform two updates concurrently"); @@ -706,10 +697,9 @@ mod fast { let now = get_block_number(&web3).await; let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -719,7 +709,8 @@ mod fast { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); @@ -732,9 +723,8 @@ mod fast { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints().unwrap(); service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should perform update"); @@ -787,10 +777,9 @@ mod persist { let now = get_block_number(&web3).await; let config = Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -798,7 +787,8 @@ mod persist { block_cache_truncation: None, ..Config::default() }; - let service = Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec()); + let service = + Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec()).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { @@ -808,9 +798,8 @@ mod persist { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints().unwrap(); service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should perform update"); @@ -822,7 +811,7 @@ mod persist { let deposit_count = service.deposit_cache_len(); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should perform update"); @@ -855,228 +844,3 @@ mod persist { .await; } } - -/// Tests for eth1 fallback -mod fallbacks { - use super::*; - use tokio::time::sleep; - - #[tokio::test] - async fn test_fallback_when_offline() { - async { - let log = null_logger(); - let endpoint2 = new_ganache_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = &endpoint2.deposit_contract; - - let initial_block_number = get_block_number(&endpoint2.web3()).await; - - // Create some blocks and then consume them, performing the test `rounds` times. - let new_blocks = 4; - - for _ in 0..new_blocks { - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let endpoint1 = endpoint2 - .ganache - .fork() - .expect("should start eth1 environment"); - - //mine additional blocks on top of the original endpoint - for _ in 0..new_blocks { - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let service = Service::new( - Config { - endpoints: Eth1Endpoint::NoAuth(vec![ - SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ]), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance: 0, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); - - let endpoint1_block_number = get_block_number(&endpoint1.web3).await; - //the first call will only query endpoint1 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint1_block_number - ); - - drop(endpoint1); - - let endpoint2_block_number = get_block_number(&endpoint2.web3()).await; - assert!(endpoint1_block_number < endpoint2_block_number); - //endpoint1 is offline => query will import blocks from endpoint2 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint2_block_number - ); - } - .await; - } - - #[tokio::test] - async fn test_fallback_when_wrong_chain_id() { - async { - let log = null_logger(); - let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into(); - let wrong_chain_id = correct_chain_id + 1; - let endpoint1 = GanacheEth1Instance::new(wrong_chain_id) - .await - .expect("should start eth1 environment"); - let endpoint2 = new_ganache_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = &endpoint2.deposit_contract; - - let initial_block_number = get_block_number(&endpoint2.web3()).await; - - // Create some blocks and then consume them, performing the test `rounds` times. - let new_blocks = 4; - - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - //additional blocks for endpoint1 to be able to distinguish - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let service = Service::new( - Config { - endpoints: Eth1Endpoint::NoAuth(vec![ - SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - ]), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance: 0, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); - - let endpoint1_block_number = get_block_number(&endpoint1.web3()).await; - let endpoint2_block_number = get_block_number(&endpoint2.web3()).await; - assert!(endpoint2_block_number < endpoint1_block_number); - //the call will fallback to endpoint2 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint2_block_number - ); - } - .await; - } - - #[tokio::test] - async fn test_fallback_when_node_far_behind() { - async { - let log = null_logger(); - let endpoint2 = new_ganache_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = &endpoint2.deposit_contract; - - let initial_block_number = get_block_number(&endpoint2.web3()).await; - - // Create some blocks and then consume them, performing the test `rounds` times. - let new_blocks = 4; - - for _ in 0..new_blocks { - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let endpoint1 = endpoint2 - .ganache - .fork() - .expect("should start eth1 environment"); - - let service = Service::new( - Config { - endpoints: Eth1Endpoint::NoAuth(vec![ - SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ]), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance: 0, - node_far_behind_seconds: 5, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); - - let endpoint1_block_number = get_block_number(&endpoint1.web3).await; - //the first call will only query endpoint1 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint1_block_number - ); - - sleep(Duration::from_secs(7)).await; - - //both endpoints don't have recent blocks => should return error - assert!(service.update().await.is_err()); - - //produce some new blocks on endpoint2 - for _ in 0..new_blocks { - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let endpoint2_block_number = get_block_number(&endpoint2.web3()).await; - - //endpoint1 is far behind + endpoint2 not => update will import blocks from endpoint2 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint2_block_number - ); - } - .await; - } -} diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 089f79aa11..5614e237ff 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -43,7 +43,7 @@ impl Eth1GenesisService { /// Creates a new service. Does not attempt to connect to the Eth1 node. /// /// Modifies the given `config` to make it more suitable to the task of listening to genesis. - pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self { + pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result { let config = Eth1Config { // Truncating the block cache makes searching for genesis more // complicated. @@ -64,15 +64,16 @@ impl Eth1GenesisService { ..config }; - Self { - eth1_service: Eth1Service::new(config, log, spec), + Ok(Self { + eth1_service: Eth1Service::new(config, log, spec) + .map_err(|e| format!("Failed to create eth1 service: {:?}", e))?, stats: Arc::new(Statistics { highest_processed_block: AtomicU64::new(0), active_validator_count: AtomicUsize::new(0), total_deposit_count: AtomicUsize::new(0), latest_timestamp: AtomicU64::new(0), }), - } + }) } /// Returns the first eth1 block that has enough deposits that it's a (potentially invalid) @@ -112,11 +113,9 @@ impl Eth1GenesisService { "Importing eth1 deposit logs"; ); - let endpoints = eth1_service.init_endpoints()?; - loop { let update_result = eth1_service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .map_err(|e| format!("{:?}", e)); @@ -158,7 +157,7 @@ impl Eth1GenesisService { } // Download new eth1 blocks into the cache. - let blocks_imported = match eth1_service.update_block_cache(None, &endpoints).await { + let blocks_imported = match eth1_service.update_block_cache(None).await { Ok(outcome) => { debug!( log, diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 74a054fcc0..58f28702b0 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -44,10 +44,9 @@ fn basic() { let service = Eth1GenesisService::new( Eth1Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -57,7 +56,8 @@ fn basic() { }, log, spec.clone(), - ); + ) + .unwrap(); // NOTE: this test is sensitive to the response speed of the external web3 server. If // you're experiencing failures, try increasing the update_interval. diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index a0dbf40b29..eaf91ce9df 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -131,7 +131,8 @@ pub async fn create_api_server_on_port( pm.inject_connection_established(&peer_id, &con_id, &connected_point, None, 0); *network_globals.sync_state.write() = SyncState::Synced; - let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()); + let eth1_service = + eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); let context = Arc::new(Context { config: Config { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 9b5f65622a..51e8762f1c 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -372,9 +372,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("eth1-endpoints") .value_name("HTTP-ENDPOINTS") .conflicts_with("eth1-endpoint") - .help("One or more comma-delimited server endpoints for web3 connection. \ - If multiple endpoints are given the endpoints are used as fallback in the \ - given order. Also enables the --eth1 flag. \ + .help("One http endpoint for a web3 connection to an execution node. \ + Note: This flag is now only useful for testing, use `--execution-endpoint` \ + flag to connect to an execution node on mainnet and testnets. Defaults to http://127.0.0.1:8545.") .takes_value(true) ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 982cb82ed4..f1d0fb35a3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -230,17 +230,14 @@ pub fn get_config( ); client_config.sync_eth1_chain = true; - let endpoints = vec![SensitiveUrl::parse(endpoint) - .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?]; - client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); - } else if let Some(endpoints) = cli_args.value_of("eth1-endpoints") { + let endpoint = SensitiveUrl::parse(endpoint) + .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?; + client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint); + } else if let Some(endpoint) = cli_args.value_of("eth1-endpoints") { client_config.sync_eth1_chain = true; - let endpoints = endpoints - .split(',') - .map(SensitiveUrl::parse) - .collect::>() + let endpoint = SensitiveUrl::parse(endpoint) .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; - client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); + client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint); } if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { @@ -326,7 +323,7 @@ pub fn get_config( --eth1-endpoints has been deprecated for post-merge configurations" ); } - client_config.eth1.endpoints = Eth1Endpoint::Auth { + client_config.eth1.endpoint = Eth1Endpoint::Auth { endpoint: execution_endpoint, jwt_path: secret_file, jwt_id: el_config.jwt_id.clone(), diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 690271022a..9fd6882202 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -117,7 +117,7 @@ impl ProductionBeaconNode { info!( log, "Block production enabled"; - "endpoints" => format!("{:?}", &client_config.eth1.endpoints), + "endpoint" => format!("{:?}", &client_config.eth1.endpoint), "method" => "json rpc via http" ); builder diff --git a/common/fallback/Cargo.toml b/common/fallback/Cargo.toml deleted file mode 100644 index 0d71bbbd27..0000000000 --- a/common/fallback/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "fallback" -version = "0.1.0" -authors = ["blacktemplar "] -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -itertools = "0.10.0" diff --git a/common/fallback/src/lib.rs b/common/fallback/src/lib.rs deleted file mode 100644 index 70f327d204..0000000000 --- a/common/fallback/src/lib.rs +++ /dev/null @@ -1,63 +0,0 @@ -use itertools::{join, zip}; -use std::fmt::{Debug, Display}; -use std::future::Future; - -#[derive(Clone)] -pub struct Fallback { - pub servers: Vec, -} - -#[derive(Debug, PartialEq)] -pub enum FallbackError { - AllErrored(Vec), -} - -impl Fallback { - pub fn new(servers: Vec) -> Self { - Self { servers } - } - - /// Return the first successful result along with number of previous errors encountered - /// or all the errors encountered if every server fails. - pub async fn first_success<'a, F, O, E, R>( - &'a self, - func: F, - ) -> Result<(O, usize), FallbackError> - where - F: Fn(&'a T) -> R, - R: Future>, - { - let mut errors = vec![]; - for server in &self.servers { - match func(server).await { - Ok(val) => return Ok((val, errors.len())), - Err(e) => errors.push(e), - } - } - Err(FallbackError::AllErrored(errors)) - } - - pub fn map_format_error<'a, E, F, S>(&'a self, f: F, error: &FallbackError) -> String - where - F: FnMut(&'a T) -> &'a S, - S: Display + 'a, - E: Debug, - { - match error { - FallbackError::AllErrored(v) => format!( - "All fallbacks errored: {}", - join( - zip(self.servers.iter().map(f), v.iter()) - .map(|(server, error)| format!("{} => {:?}", server, error)), - ", " - ) - ), - } - } -} - -impl Fallback { - pub fn format_error(&self, error: &FallbackError) -> String { - self.map_format_error(|s| s, error) - } -} diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index 8699a8cf2c..b59a6dfb89 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -43,6 +43,16 @@ impl JsonMetric { } } } + + /// Return a default json value given given the metric type. + fn get_typed_value_default(&self) -> serde_json::Value { + match self.ty { + JsonType::Integer => json!(0), + JsonType::Boolean => { + json!(false) + } + } + } } /// The required metrics for the beacon and validator processes. @@ -155,6 +165,16 @@ pub fn gather_metrics(metrics_map: &HashMap) -> Option( .value_of("eth1-endpoint") .map(|e| { warn!("The --eth1-endpoint flag is deprecated. Please use --eth1-endpoints instead"); - vec![String::from(e)] + String::from(e) }) - .or_else(|| { - matches - .value_of("eth1-endpoints") - .map(|s| s.split(',').map(String::from).collect()) - }); + .or_else(|| matches.value_of("eth1-endpoints").map(String::from)); let mut eth2_network_config = Eth2NetworkConfig::load(testnet_dir.clone())?; @@ -35,12 +31,9 @@ pub fn run( let mut config = Eth1Config::default(); if let Some(v) = endpoints.clone() { - let endpoints = v - .iter() - .map(|s| SensitiveUrl::parse(s)) - .collect::>() + let endpoint = SensitiveUrl::parse(&v) .map_err(|e| format!("Unable to parse eth1 endpoint URL: {:?}", e))?; - config.endpoints = Eth1Endpoint::NoAuth(endpoints); + config.endpoint = Eth1Endpoint::NoAuth(endpoint); } config.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); config.deposit_contract_deploy_block = eth2_network_config.deposit_contract_deploy_block; @@ -49,7 +42,7 @@ pub fn run( config.node_far_behind_seconds = max(5, config.follow_distance) * spec.seconds_per_eth1_block; let genesis_service = - Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone()); + Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone())?; env.runtime().block_on(async { let _ = genesis_service diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 661bbcdb0c..288d18c1fa 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -68,7 +68,7 @@ fn staking_flag() { assert!(config.http_api.enabled); assert!(config.sync_eth1_chain); assert_eq!( - config.eth1.endpoints.get_endpoints()[0].to_string(), + config.eth1.endpoint.get_endpoint().to_string(), DEFAULT_ETH1_ENDPOINT ); }); @@ -293,28 +293,17 @@ fn eth1_flag() { #[test] fn eth1_endpoints_flag() { CommandLineTest::new() - .flag( - "eth1-endpoints", - Some("http://localhost:9545,https://infura.io/secret"), - ) + .flag("eth1-endpoints", Some("http://localhost:9545")) .run_with_zero_port() .with_config(|config| { assert_eq!( - config.eth1.endpoints.get_endpoints()[0].full.to_string(), + config.eth1.endpoint.get_endpoint().full.to_string(), "http://localhost:9545/" ); assert_eq!( - config.eth1.endpoints.get_endpoints()[0].to_string(), + config.eth1.endpoint.get_endpoint().to_string(), "http://localhost:9545/" ); - assert_eq!( - config.eth1.endpoints.get_endpoints()[1].full.to_string(), - "https://infura.io/secret" - ); - assert_eq!( - config.eth1.endpoints.get_endpoints()[1].to_string(), - "https://infura.io/" - ); assert!(config.sync_eth1_chain); }); } @@ -429,7 +418,7 @@ fn run_execution_endpoints_overrides_eth1_endpoints_test(eth1_flag: &str, execut // The eth1 endpoint should have been set to the --execution-endpoint value in defiance // of --eth1-endpoints. assert_eq!( - config.eth1.endpoints, + config.eth1.endpoint, Eth1Endpoint::Auth { endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), jwt_path: jwt_path.clone(), @@ -624,7 +613,7 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl assert_eq!(el_config.jwt_id, Some(id.to_string())); assert_eq!(el_config.jwt_version, Some(version.to_string())); assert_eq!( - config.eth1.endpoints, + config.eth1.endpoint, Eth1Endpoint::Auth { endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), jwt_path: dir.path().join(jwt_file), diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 5e346d5466..182a66b498 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,4 +1,4 @@ -use crate::local_network::{EXECUTION_PORT, INVALID_ADDRESS, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; +use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; @@ -138,7 +138,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let mut beacon_config = testing_client_config(); beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoints = Eth1Endpoint::NoAuth(vec![eth1_endpoint]); + beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint); beacon_config.eth1.deposit_contract_address = deposit_contract_address; beacon_config.eth1.deposit_contract_deploy_block = 0; beacon_config.eth1.lowest_cached_block_number = 0; @@ -173,18 +173,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { /* * One by one, add beacon nodes to the network. */ - for i in 0..node_count - 1 { - let mut config = beacon_config.clone(); - if i % 2 == 0 { - if let Eth1Endpoint::NoAuth(endpoints) = &mut config.eth1.endpoints { - endpoints.insert( - 0, - SensitiveUrl::parse(INVALID_ADDRESS) - .expect("Unable to parse invalid address"), - ) - } - } - network.add_beacon_node(config).await?; + for _ in 0..node_count - 1 { + network.add_beacon_node(beacon_config.clone()).await?; } /* From 6a92bf70e478c627b981629c98031059f11125c7 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 4 Oct 2022 08:33:40 +0000 Subject: [PATCH 19/27] CLI tests for logging flags (#3609) ## Issue Addressed Adding CLI tests for logging flags: log-color and disable-log-timestamp Which issue # does this PR address? #3588 ## Proposed Changes Add CLI tests for logging flags as described in #3588 Please list or describe the changes introduced by this PR. Added logger_config to client::Config as suggested. Implemented Default for LoggerConfig based on what was being done elsewhere in the repo. Created 2 tests for each flag addressed. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. --- Cargo.lock | 2 ++ beacon_node/client/src/config.rs | 4 +++- lcli/src/main.rs | 4 ++-- lighthouse/environment/Cargo.toml | 2 ++ lighthouse/environment/src/lib.rs | 33 ++++++++++++++++++------- lighthouse/src/main.rs | 11 +++++---- lighthouse/tests/beacon_node.rs | 36 ++++++++++++++++++++++++++++ testing/simulator/src/eth1_sim.rs | 9 +++---- testing/simulator/src/no_eth1_sim.rs | 9 +++---- testing/simulator/src/sync_sim.rs | 6 ++--- 10 files changed, 85 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8fb8c54929..da07bf4250 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1631,6 +1631,8 @@ dependencies = [ "exit-future", "futures", "logging", + "serde", + "serde_derive", "slog", "slog-async", "slog-json", diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index a5d5b37c7a..5e43c1eaad 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,11 +1,11 @@ use directory::DEFAULT_ROOT_DIR; +use environment::LoggerConfig; use network::NetworkConfig; use sensitive_url::SensitiveUrl; use serde_derive::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use types::{Graffiti, PublicKeyBytes}; - /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; @@ -72,6 +72,7 @@ pub struct Config { pub http_metrics: http_metrics::Config, pub monitoring_api: Option, pub slasher: Option, + pub logger_config: LoggerConfig, } impl Default for Config { @@ -96,6 +97,7 @@ impl Default for Config { slasher: None, validator_monitor_auto: false, validator_monitor_pubkeys: vec![], + logger_config: LoggerConfig::default(), } } } diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 11a23fe0b4..8b233d847b 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -781,8 +781,8 @@ fn run( .map_err(|e| format!("should start tokio runtime: {:?}", e))? .initialize_logger(LoggerConfig { path: None, - debug_level: "trace", - logfile_debug_level: "trace", + debug_level: String::from("trace"), + logfile_debug_level: String::from("trace"), log_format: None, log_color: false, disable_log_timestamp: false, diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 7dc31e06bf..1ba0bb267c 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -18,6 +18,8 @@ slog-async = "2.5.0" futures = "0.3.7" slog-json = "2.3.0" exit-future = "0.2.0" +serde = "1.0.116" +serde_derive = "1.0.116" [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 46348e63ba..49163b96f4 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -12,6 +12,7 @@ use eth2_network_config::Eth2NetworkConfig; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; +use serde_derive::{Deserialize, Serialize}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; use std::fs::create_dir_all; @@ -43,17 +44,33 @@ const MAXIMUM_SHUTDOWN_TIME: u64 = 15; /// - `path` == None, /// - `max_log_size` == 0, /// - `max_log_number` == 0, -pub struct LoggerConfig<'a> { +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LoggerConfig { pub path: Option, - pub debug_level: &'a str, - pub logfile_debug_level: &'a str, - pub log_format: Option<&'a str>, + pub debug_level: String, + pub logfile_debug_level: String, + pub log_format: Option, pub log_color: bool, pub disable_log_timestamp: bool, pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, } +impl Default for LoggerConfig { + fn default() -> Self { + LoggerConfig { + path: None, + debug_level: String::from("info"), + logfile_debug_level: String::from("debug"), + log_format: None, + log_color: false, + disable_log_timestamp: false, + max_log_size: 200, + max_log_number: 5, + compression: false, + } + } +} /// Builds an `Environment`. pub struct EnvironmentBuilder { @@ -135,7 +152,7 @@ impl EnvironmentBuilder { /// Note that background file logging will spawn a new thread. pub fn initialize_logger(mut self, config: LoggerConfig) -> Result { // Setting up the initial logger format and build it. - let stdout_drain = if let Some(format) = config.log_format { + let stdout_drain = if let Some(ref format) = config.log_format { match format.to_uppercase().as_str() { "JSON" => { let stdout_drain = slog_json::Json::default(std::io::stdout()).fuse(); @@ -168,7 +185,7 @@ impl EnvironmentBuilder { .build() }; - let stdout_drain = match config.debug_level { + let stdout_drain = match config.debug_level.as_str() { "info" => stdout_drain.filter_level(Level::Info), "debug" => stdout_drain.filter_level(Level::Debug), "trace" => stdout_drain.filter_level(Level::Trace), @@ -220,7 +237,7 @@ impl EnvironmentBuilder { } } - let logfile_level = match config.logfile_debug_level { + let logfile_level = match config.logfile_debug_level.as_str() { "info" => Severity::Info, "debug" => Severity::Debug, "trace" => Severity::Trace, @@ -233,7 +250,7 @@ impl EnvironmentBuilder { let file_logger = FileLoggerBuilder::new(&path) .level(logfile_level) .channel_size(LOG_CHANNEL_SIZE) - .format(match config.log_format { + .format(match config.log_format.as_deref() { Some("JSON") => Format::Json, _ => Format::default(), }) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 341e1a91d5..9dc0902e06 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -438,9 +438,9 @@ fn run( let logger_config = LoggerConfig { path: log_path, - debug_level, - logfile_debug_level, - log_format, + debug_level: String::from(debug_level), + logfile_debug_level: String::from(logfile_debug_level), + log_format: log_format.map(String::from), log_color, disable_log_timestamp, max_log_size: logfile_max_size * 1_024 * 1_024, @@ -448,7 +448,7 @@ fn run( compression: logfile_compress, }; - let builder = environment_builder.initialize_logger(logger_config)?; + let builder = environment_builder.initialize_logger(logger_config.clone())?; let mut environment = builder .multi_threaded_tokio_runtime()? @@ -528,7 +528,8 @@ fn run( let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); - let config = beacon_node::get_config::(matches, &context)?; + let mut config = beacon_node::get_config::(matches, &context)?; + config.logger_config = logger_config; let shutdown_flag = matches.is_present("immediate-shutdown"); // Dump configs if `dump-config` or `dump-chain-config` flags are set clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 288d18c1fa..2e76d832ce 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1454,3 +1454,39 @@ fn monitoring_endpoint() { assert_eq!(api_conf.update_period_secs, Some(30)); }); } + +// Tests for Logger flags. +#[test] +fn default_log_color_flag() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(!config.logger_config.log_color); + }); +} +#[test] +fn enabled_log_color_flag() { + CommandLineTest::new() + .flag("log-color", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.log_color); + }); +} +#[test] +fn default_disable_log_timestamp_flag() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(!config.logger_config.disable_log_timestamp); + }); +} +#[test] +fn enabled_disable_log_timestamp_flag() { + CommandLineTest::new() + .flag("disable-log-timestamp", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.disable_log_timestamp); + }); +} diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 182a66b498..3d59013f2a 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -56,15 +56,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { }) .collect::>(); - let log_level = "debug"; - let log_format = None; - let mut env = EnvironmentBuilder::minimal() .initialize_logger(LoggerConfig { path: None, - debug_level: log_level, - logfile_debug_level: "debug", - log_format, + debug_level: String::from("debug"), + logfile_debug_level: String::from("debug"), + log_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 57e2e01eb6..06f9e9a4f3 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -41,15 +41,12 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { }) .collect::>(); - let log_level = "debug"; - let log_format = None; - let mut env = EnvironmentBuilder::mainnet() .initialize_logger(LoggerConfig { path: None, - debug_level: log_level, - logfile_debug_level: "debug", - log_format, + debug_level: String::from("debug"), + logfile_debug_level: String::from("debug"), + log_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index af5ba95e01..00e439e4c9 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -48,9 +48,9 @@ fn syncing_sim( let mut env = EnvironmentBuilder::minimal() .initialize_logger(LoggerConfig { path: None, - debug_level: log_level, - logfile_debug_level: "debug", - log_format, + debug_level: String::from(log_level), + logfile_debug_level: String::from("debug"), + log_format: log_format.map(String::from), log_color: false, disable_log_timestamp: false, max_log_size: 0, From 4926e3967fe3192fa019942e7450de6cf6ed607c Mon Sep 17 00:00:00 2001 From: Divma Date: Tue, 4 Oct 2022 10:37:48 +0000 Subject: [PATCH 20/27] [DEV FEATURE] Deterministic long lived subnets (#3453) ## Issue Addressed #2847 ## Proposed Changes Add under a feature flag the required changes to subscribe to long lived subnets in a deterministic way ## Additional Info There is an additional required change that is actually searching for peers using the prefix, but I find that it's best to make this change in the future --- Cargo.lock | 1 + beacon_node/network/Cargo.toml | 5 + beacon_node/network/src/service.rs | 10 +- .../src/subnet_service/attestation_subnets.rs | 231 +++++++++++++++++- .../network/src/subnet_service/tests/mod.rs | 94 +++++-- consensus/types/src/chain_spec.rs | 25 ++ consensus/types/src/subnet_id.rs | 42 +++- 7 files changed, 371 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da07bf4250..d0e3622e77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4150,6 +4150,7 @@ dependencies = [ "error-chain", "eth2_ssz", "eth2_ssz_types", + "ethereum-types 0.12.1", "exit-future", "fnv", "futures", diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 87c7650fb5..2e7b2227b2 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -44,3 +44,8 @@ strum = "0.24.0" tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" delay_map = "0.1.1" +ethereum-types = { version = "0.12.1", optional = true } + +[features] +deterministic_long_lived_attnets = [ "ethereum-types" ] +# default = ["deterministic_long_lived_attnets"] diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index ec8573ea1f..31c42b860d 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -299,9 +299,13 @@ impl NetworkService { )?; // attestation subnet service - let attestation_service = - AttestationService::new(beacon_chain.clone(), config, &network_log); - + let attestation_service = AttestationService::new( + beacon_chain.clone(), + #[cfg(feature = "deterministic_long_lived_attnets")] + network_globals.local_enr().node_id().raw().into(), + config, + &network_log, + ); // sync committee subnet service let sync_committee_service = SyncCommitteeService::new(beacon_chain.clone(), config, &network_log); diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index ee8ba24fc3..70ba1c8170 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -3,7 +3,7 @@ //! determines whether attestations should be aggregated and/or passed to the beacon node. use super::SubnetServiceMessage; -#[cfg(test)] +#[cfg(any(test, feature = "deterministic_long_lived_attnets"))] use std::collections::HashSet; use std::collections::{HashMap, VecDeque}; use std::pin::Pin; @@ -15,6 +15,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use delay_map::{HashMapDelay, HashSetDelay}; use futures::prelude::*; use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; +#[cfg(not(feature = "deterministic_long_lived_attnets"))] use rand::seq::SliceRandom; use slog::{debug, error, o, trace, warn}; use slot_clock::SlotClock; @@ -28,6 +29,7 @@ use crate::metrics; pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// The time (in slots) before a last seen validator is considered absent and we unsubscribe from /// the random gossip topics that we subscribed to due to the validator connection. +#[cfg(not(feature = "deterministic_long_lived_attnets"))] const LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS: u32 = 150; /// The fraction of a slot that we subscribe to a subnet before the required slot. /// @@ -70,6 +72,9 @@ pub struct AttestationService { /// Subnets we are currently subscribed to as long lived subscriptions. /// /// We advertise these in our ENR. When these expire, the subnet is removed from our ENR. + #[cfg(feature = "deterministic_long_lived_attnets")] + long_lived_subscriptions: HashSet, + #[cfg(not(feature = "deterministic_long_lived_attnets"))] long_lived_subscriptions: HashMapDelay, /// Short lived subscriptions that need to be done in the future. @@ -83,6 +88,7 @@ pub struct AttestationService { /// subscribed to. As these time out, we unsubscribe for the required random subnets and update /// our ENR. /// This is a set of validator indices. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] known_validators: HashSetDelay, /// The waker for the current thread. @@ -95,8 +101,17 @@ pub struct AttestationService { subscribe_all_subnets: bool, /// For how many slots we subscribe to long lived subnets. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] long_lived_subnet_subscription_slots: u64, + /// Our Discv5 node_id. + #[cfg(feature = "deterministic_long_lived_attnets")] + node_id: ethereum_types::U256, + + /// Future used to manage subscribing and unsubscribing from long lived subnets. + #[cfg(feature = "deterministic_long_lived_attnets")] + next_long_lived_subscription_event: Pin>, + /// The logger for the attestation service. log: slog::Logger, } @@ -104,6 +119,7 @@ pub struct AttestationService { impl AttestationService { /* Public functions */ + #[cfg(not(feature = "deterministic_long_lived_attnets"))] pub fn new( beacon_chain: Arc>, config: &NetworkConfig, @@ -145,31 +161,85 @@ impl AttestationService { } } + #[cfg(feature = "deterministic_long_lived_attnets")] + pub fn new( + beacon_chain: Arc>, + node_id: ethereum_types::U256, + config: &NetworkConfig, + log: &slog::Logger, + ) -> Self { + let log = log.new(o!("service" => "attestation_service")); + + // Calculate the random subnet duration from the spec constants. + let slot_duration = beacon_chain.slot_clock.slot_duration(); + + slog::info!(log, "Deterministic long lived subnets enabled"; "subnets_per_node" => beacon_chain.spec.subnets_per_node); + + let track_validators = !config.import_all_attestations; + let aggregate_validators_on_subnet = + track_validators.then(|| HashSetDelay::new(slot_duration)); + let mut service = AttestationService { + events: VecDeque::with_capacity(10), + beacon_chain, + short_lived_subscriptions: HashMapDelay::new(slot_duration), + long_lived_subscriptions: HashSet::default(), + scheduled_short_lived_subscriptions: HashSetDelay::default(), + aggregate_validators_on_subnet, + waker: None, + discovery_disabled: config.disable_discovery, + subscribe_all_subnets: config.subscribe_all_subnets, + node_id, + next_long_lived_subscription_event: { + // Set a dummy sleep. Calculating the current subnet subscriptions will update this + // value with a smarter timing + Box::pin(tokio::time::sleep(Duration::from_secs(1))) + }, + log, + }; + service.recompute_long_lived_subnets(); + service + } + /// Return count of all currently subscribed subnets (long-lived **and** short-lived). #[cfg(test)] pub fn subscription_count(&self) -> usize { if self.subscribe_all_subnets { self.beacon_chain.spec.attestation_subnet_count as usize } else { - self.short_lived_subscriptions + #[cfg(feature = "deterministic_long_lived_attnets")] + let count = self + .short_lived_subscriptions + .keys() + .chain(self.long_lived_subscriptions.iter()) + .collect::>() + .len(); + #[cfg(not(feature = "deterministic_long_lived_attnets"))] + let count = self + .short_lived_subscriptions .keys() .chain(self.long_lived_subscriptions.keys()) .collect::>() - .len() + .len(); + count } } - /// Give access to the current subscriptions for testing purposes. + /// Returns whether we are subscribed to a subnet for testing purposes. #[cfg(test)] - pub(crate) fn subscriptions( + pub(crate) fn is_subscribed( &self, + subnet_id: &SubnetId, subscription_kind: SubscriptionKind, - ) -> &HashMapDelay { + ) -> bool { match subscription_kind { - SubscriptionKind::LongLived => &self.long_lived_subscriptions, - SubscriptionKind::ShortLived => &self.short_lived_subscriptions, + #[cfg(feature = "deterministic_long_lived_attnets")] + SubscriptionKind::LongLived => self.long_lived_subscriptions.contains(subnet_id), + #[cfg(not(feature = "deterministic_long_lived_attnets"))] + SubscriptionKind::LongLived => self.long_lived_subscriptions.contains_key(subnet_id), + SubscriptionKind::ShortLived => self.short_lived_subscriptions.contains_key(subnet_id), } } + /// Processes a list of validator subscriptions. /// /// This will: @@ -197,6 +267,7 @@ impl AttestationService { "Validator subscription"; "subscription" => ?subscription, ); + #[cfg(not(feature = "deterministic_long_lived_attnets"))] self.add_known_validator(subscription.validator_index); let subnet_id = match SubnetId::compute_subnet::( @@ -267,6 +338,111 @@ impl AttestationService { Ok(()) } + #[cfg(feature = "deterministic_long_lived_attnets")] + fn recompute_long_lived_subnets(&mut self) { + // Ensure the next computation is scheduled even if assigning subnets fails. + let next_subscription_event = self + .recompute_long_lived_subnets_inner() + .unwrap_or_else(|_| self.beacon_chain.slot_clock.slot_duration()); + + debug!(self.log, "Recomputing deterministic long lived attnets"); + self.next_long_lived_subscription_event = + Box::pin(tokio::time::sleep(next_subscription_event)); + + if let Some(waker) = self.waker.as_ref() { + waker.wake_by_ref(); + } + } + + /// Gets the long lived subnets the node should be subscribed to during the current epoch and + /// the remaining duration for which they remain valid. + #[cfg(feature = "deterministic_long_lived_attnets")] + fn recompute_long_lived_subnets_inner(&mut self) -> Result { + let current_epoch = self.beacon_chain.epoch().map_err( + |e| error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e), + )?; + + let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( + self.node_id, + current_epoch, + &self.beacon_chain.spec, + ) + .map_err(|e| error!(self.log, "Could not compute subnets for current epoch"; "err" => e))?; + + let next_subscription_slot = + next_subscription_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let next_subscription_event = self + .beacon_chain + .slot_clock + .duration_to_slot(next_subscription_slot) + .ok_or_else(|| { + error!( + self.log, + "Failed to compute duration to next to long lived subscription event" + ) + })?; + + self.update_long_lived_subnets(subnets.collect()); + + Ok(next_subscription_event) + } + + #[cfg(all(test, feature = "deterministic_long_lived_attnets"))] + pub fn update_long_lived_subnets_testing(&mut self, subnets: HashSet) { + self.update_long_lived_subnets(subnets) + } + + /// Updates the long lived subnets. + /// + /// New subnets are registered as subscribed, removed subnets as unsubscribed and the Enr + /// updated accordingly. + #[cfg(feature = "deterministic_long_lived_attnets")] + fn update_long_lived_subnets(&mut self, mut subnets: HashSet) { + for subnet in &subnets { + // Add the events for those subnets that are new as long lived subscriptions. + if !self.long_lived_subscriptions.contains(subnet) { + // Check if this subnet is new and send the subscription event if needed. + if !self.short_lived_subscriptions.contains_key(subnet) { + debug!(self.log, "Subscribing to subnet"; + "subnet" => ?subnet, + "subscription_kind" => ?SubscriptionKind::LongLived, + ); + self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( + *subnet, + ))); + } + self.queue_event(SubnetServiceMessage::EnrAdd(Subnet::Attestation(*subnet))); + if !self.discovery_disabled { + self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet: Subnet::Attestation(*subnet), + min_ttl: None, + }])) + } + } + } + + // Check for subnets that are being removed + std::mem::swap(&mut self.long_lived_subscriptions, &mut subnets); + for subnet in subnets { + if !self.long_lived_subscriptions.contains(&subnet) { + if !self.short_lived_subscriptions.contains_key(&subnet) { + debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet, "subscription_kind" => ?SubscriptionKind::LongLived); + self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + subnet, + ))); + } + + self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation(subnet))); + } + } + } + + /// Overwrites the long lived subscriptions for testing. + #[cfg(all(test, feature = "deterministic_long_lived_attnets"))] + pub fn set_long_lived_subscriptions(&mut self, subnets: HashSet) { + self.long_lived_subscriptions = subnets + } + /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip /// verification, re-propagates and returns false. pub fn should_process_attestation( @@ -377,6 +553,7 @@ impl AttestationService { // This is a current or past slot, we subscribe immediately. self.subscribe_to_subnet_immediately( subnet_id, + #[cfg(not(feature = "deterministic_long_lived_attnets"))] SubscriptionKind::ShortLived, slot + 1, )?; @@ -391,6 +568,7 @@ impl AttestationService { } /// Updates the `known_validators` mapping and subscribes to long lived subnets if required. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn add_known_validator(&mut self, validator_index: u64) { let previously_known = self.known_validators.contains_key(&validator_index); // Add the new validator or update the current timeout for a known validator. @@ -405,6 +583,7 @@ impl AttestationService { /// Subscribe to long-lived random subnets and update the local ENR bitfield. /// The number of subnets to subscribe depends on the number of active validators and number of /// current subscriptions. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn subscribe_to_random_subnets(&mut self) { if self.subscribe_all_subnets { // This case is not handled by this service. @@ -468,9 +647,12 @@ impl AttestationService { /// Checks that the time in which the subscription would end is not in the past. If we are /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send /// out the appropriate events. + /// + /// On determinist long lived subnets, this is only used for short lived subscriptions. fn subscribe_to_subnet_immediately( &mut self, subnet_id: SubnetId, + #[cfg(not(feature = "deterministic_long_lived_attnets"))] subscription_kind: SubscriptionKind, end_slot: Slot, ) -> Result<(), &'static str> { @@ -490,9 +672,13 @@ impl AttestationService { return Err("Time when subscription would end has already passed."); } + #[cfg(feature = "deterministic_long_lived_attnets")] + let subscription_kind = SubscriptionKind::ShortLived; + // We need to check and add a subscription for the right kind, regardless of the presence // of the subnet as a subscription of the other kind. This is mainly since long lived // subscriptions can be removed at any time when a validator goes offline. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] let (subscriptions, already_subscribed_as_other_kind) = match subscription_kind { SubscriptionKind::ShortLived => ( &mut self.short_lived_subscriptions, @@ -504,6 +690,12 @@ impl AttestationService { ), }; + #[cfg(feature = "deterministic_long_lived_attnets")] + let (subscriptions, already_subscribed_as_other_kind) = ( + &mut self.short_lived_subscriptions, + self.long_lived_subscriptions.contains(&subnet_id), + ); + match subscriptions.get(&subnet_id) { Some(current_end_slot) => { // We are already subscribed. Check if we need to extend the subscription. @@ -535,6 +727,7 @@ impl AttestationService { } // If this is a new long lived subscription, send out the appropriate events. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] if SubscriptionKind::LongLived == subscription_kind { let subnet = Subnet::Attestation(subnet_id); // Advertise this subnet in our ENR. @@ -564,6 +757,7 @@ impl AttestationService { /// /// This function selects a new subnet to join, or extends the expiry if there are no more /// available subnets to choose from. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId) { self.handle_removed_subnet(subnet_id, SubscriptionKind::LongLived); @@ -576,12 +770,15 @@ impl AttestationService { // subscription of the other kind. For long lived subscriptions, it also removes the // advertisement from our ENR. fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) { - let other_subscriptions = match subscription_kind { - SubscriptionKind::LongLived => &self.short_lived_subscriptions, - SubscriptionKind::ShortLived => &self.long_lived_subscriptions, + let exists_in_other_subscriptions = match subscription_kind { + SubscriptionKind::LongLived => self.short_lived_subscriptions.contains_key(&subnet_id), + #[cfg(feature = "deterministic_long_lived_attnets")] + SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains(&subnet_id), + #[cfg(not(feature = "deterministic_long_lived_attnets"))] + SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains_key(&subnet_id), }; - if !other_subscriptions.contains_key(&subnet_id) { + if !exists_in_other_subscriptions { // Subscription no longer exists as short lived or long lived. debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet_id, "subscription_kind" => ?subscription_kind); self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( @@ -603,6 +800,7 @@ impl AttestationService { /// We don't keep track of a specific validator to random subnet, rather the ratio of active /// validators to random subnets. So when a validator goes offline, we can simply remove the /// allocated amount of random subnets. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn handle_known_validator_expiry(&mut self) { // Calculate how many subnets should we remove. let extra_subnet_count = { @@ -659,6 +857,7 @@ impl Stream for AttestationService { // Process first any known validator expiries, since these affect how many long lived // subnets we need. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] match self.known_validators.poll_next_unpin(cx) { Poll::Ready(Some(Ok(_validator_index))) => { self.handle_known_validator_expiry(); @@ -669,12 +868,19 @@ impl Stream for AttestationService { Poll::Ready(None) | Poll::Pending => {} } + #[cfg(feature = "deterministic_long_lived_attnets")] + match self.next_long_lived_subscription_event.as_mut().poll(cx) { + Poll::Ready(_) => self.recompute_long_lived_subnets(), + Poll::Pending => {} + } + // Process scheduled subscriptions that might be ready, since those can extend a soon to // expire subscription. match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) { Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => { if let Err(e) = self.subscribe_to_subnet_immediately( subnet_id, + #[cfg(not(feature = "deterministic_long_lived_attnets"))] SubscriptionKind::ShortLived, slot + 1, ) { @@ -699,6 +905,7 @@ impl Stream for AttestationService { } // Process any random subnet expiries. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] match self.long_lived_subscriptions.poll_next_unpin(cx) { Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { self.handle_random_subnet_expiry(subnet_id) diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 65ca9f2194..30f030eba7 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -123,7 +123,15 @@ fn get_attestation_service( let beacon_chain = CHAIN.chain.clone(); - AttestationService::new(beacon_chain, &config, &log) + AttestationService::new( + beacon_chain, + #[cfg(feature = "deterministic_long_lived_attnets")] + lighthouse_network::discv5::enr::NodeId::random() + .raw() + .into(), + &config, + &log, + ) } fn get_sync_committee_service() -> SyncCommitteeService { @@ -170,6 +178,9 @@ async fn get_events + Unpin>( mod attestation_service { + #[cfg(feature = "deterministic_long_lived_attnets")] + use std::collections::HashSet; + use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; use super::*; @@ -190,6 +201,7 @@ mod attestation_service { } } + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn get_subscriptions( validator_count: u64, slot: Slot, @@ -268,8 +280,7 @@ mod attestation_service { // If the long lived and short lived subnets are the same, there should be no more events // as we don't resubscribe already subscribed subnets. if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id) + .is_subscribed(&subnet_id, attestation_subnets::SubscriptionKind::LongLived) { assert_eq!(expected[..], events[3..]); } @@ -352,11 +363,12 @@ mod attestation_service { let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); - // Should be still subscribed to 1 long lived and 1 short lived subnet if both are different. - if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id1) - { + // Should be still subscribed to 1 long lived and 1 short lived subnet if both are + // different. + if !attestation_service.is_subscribed( + &subnet_id1, + attestation_subnets::SubscriptionKind::LongLived, + ) { assert_eq!(expected, events[3]); assert_eq!(attestation_service.subscription_count(), 2); } else { @@ -366,11 +378,12 @@ mod attestation_service { // Get event for 1 more slot duration, we should get the unsubscribe event now. let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; - // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id1) - { + // If the long lived and short lived subnets are different, we should get an unsubscription + // event. + if !attestation_service.is_subscribed( + &subnet_id1, + attestation_subnets::SubscriptionKind::LongLived, + ) { assert_eq!( [SubnetServiceMessage::Unsubscribe(Subnet::Attestation( subnet_id1 @@ -383,6 +396,7 @@ mod attestation_service { assert_eq!(attestation_service.subscription_count(), 1); } + #[cfg(not(feature = "deterministic_long_lived_attnets"))] #[tokio::test] async fn subscribe_all_random_subnets() { let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; @@ -440,6 +454,7 @@ mod attestation_service { // test completed successfully } + #[cfg(not(feature = "deterministic_long_lived_attnets"))] #[tokio::test] async fn subscribe_all_random_subnets_plus_one() { let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; @@ -573,10 +588,10 @@ mod attestation_service { let expected_unsubscription = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); - if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id1) - { + if !attestation_service.is_subscribed( + &subnet_id1, + attestation_subnets::SubscriptionKind::LongLived, + ) { assert_eq!(expected_subscription, events[3]); // fourth is a discovery event assert_eq!(expected_unsubscription, events[5]); @@ -600,10 +615,10 @@ mod attestation_service { let second_subscribe_event = get_events(&mut attestation_service, None, 2).await; // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id1) - { + if !attestation_service.is_subscribed( + &subnet_id1, + attestation_subnets::SubscriptionKind::LongLived, + ) { assert_eq!( [SubnetServiceMessage::Subscribe(Subnet::Attestation( subnet_id1 @@ -612,6 +627,43 @@ mod attestation_service { ); } } + + #[tokio::test] + #[cfg(feature = "deterministic_long_lived_attnets")] + async fn test_update_deterministic_long_lived_subnets() { + let mut attestation_service = get_attestation_service(None); + let new_subnet = SubnetId::new(1); + let maintained_subnet = SubnetId::new(2); + let removed_subnet = SubnetId::new(3); + + attestation_service + .set_long_lived_subscriptions(HashSet::from([removed_subnet, maintained_subnet])); + // clear initial events + let _events = get_events(&mut attestation_service, None, 1).await; + + attestation_service + .update_long_lived_subnets_testing(HashSet::from([maintained_subnet, new_subnet])); + + let events = get_events(&mut attestation_service, None, 1).await; + let new_subnet = Subnet::Attestation(new_subnet); + let removed_subnet = Subnet::Attestation(removed_subnet); + assert_eq!( + events, + [ + // events for the new subnet + SubnetServiceMessage::Subscribe(new_subnet), + SubnetServiceMessage::EnrAdd(new_subnet), + SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet: new_subnet, + min_ttl: None + }]), + // events for the removed subnet + SubnetServiceMessage::Unsubscribe(removed_subnet), + SubnetServiceMessage::EnrRemove(removed_subnet), + ] + ); + println!("{events:?}") + } } mod sync_committee_service { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b2ba24ac3e..f68e65d7d5 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -161,6 +161,9 @@ pub struct ChainSpec { pub attestation_subnet_count: u64, pub random_subnets_per_validator: u64, pub epochs_per_random_subnet_subscription: u64, + pub subnets_per_node: u8, + pub epochs_per_subnet_subscription: u64, + attestation_subnet_extra_bits: u8, /* * Application params @@ -427,6 +430,22 @@ impl ChainSpec { Hash256::from(domain) } + #[allow(clippy::integer_arithmetic)] + pub const fn attestation_subnet_prefix_bits(&self) -> u32 { + // maybe use log2 when stable https://github.com/rust-lang/rust/issues/70887 + + // NOTE: this line is here simply to guarantee that if self.attestation_subnet_count type + // is changed, a compiler warning will be raised. This code depends on the type being u64. + let attestation_subnet_count: u64 = self.attestation_subnet_count; + let attestation_subnet_count_bits = if attestation_subnet_count == 0 { + 0 + } else { + 63 - attestation_subnet_count.leading_zeros() + }; + + self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits + } + /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. pub fn mainnet() -> Self { Self { @@ -576,9 +595,12 @@ impl ChainSpec { attestation_propagation_slot_range: 32, attestation_subnet_count: 64, random_subnets_per_validator: 1, + subnets_per_node: 1, maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + epochs_per_subnet_subscription: 256, + attestation_subnet_extra_bits: 6, /* * Application specific @@ -786,9 +808,12 @@ impl ChainSpec { attestation_propagation_slot_range: 32, attestation_subnet_count: 64, random_subnets_per_validator: 1, + subnets_per_node: 1, maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + epochs_per_subnet_subscription: 256, + attestation_subnet_extra_bits: 6, /* * Application specific diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 046ed8f33e..e1de277615 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,8 +1,9 @@ //! Identifies each shard by an integer identifier. -use crate::{AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; +use crate::{AttestationData, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; +use swap_or_not_shuffle::compute_shuffled_index; const MAX_SUBNET_ID: usize = 64; @@ -71,6 +72,45 @@ impl SubnetId { .safe_rem(spec.attestation_subnet_count)? .into()) } + + #[allow(clippy::integer_arithmetic)] + /// Computes the set of subnets the node should be subscribed to during the current epoch, + /// along with the first epoch in which these subscriptions are no longer valid. + pub fn compute_subnets_for_epoch( + node_id: ethereum_types::U256, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result<(impl Iterator, Epoch), &'static str> { + let node_id_prefix = + (node_id >> (256 - spec.attestation_subnet_prefix_bits() as usize)).as_usize(); + + let subscription_event_idx = epoch.as_u64() / spec.epochs_per_subnet_subscription; + let permutation_seed = + eth2_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); + + let num_subnets = 1 << spec.attestation_subnet_prefix_bits(); + + let permutated_prefix = compute_shuffled_index( + node_id_prefix, + num_subnets, + &permutation_seed, + spec.shuffle_round_count, + ) + .ok_or("Unable to shuffle")? as u64; + + // Get the constants we need to avoid holding a reference to the spec + let &ChainSpec { + subnets_per_node, + attestation_subnet_count, + .. + } = spec; + + let subnet_set_generator = (0..subnets_per_node).map(move |idx| { + SubnetId::new((permutated_prefix + idx as u64) % attestation_subnet_count) + }); + let valid_until_epoch = (subscription_event_idx + 1) * spec.epochs_per_subnet_subscription; + Ok((subnet_set_generator, valid_until_epoch.into())) + } } impl Deref for SubnetId { From 242ae21e5df875f04c8b9182d108aedfa1f6ae9f Mon Sep 17 00:00:00 2001 From: mariuspod <14898268+mariuspod@users.noreply.github.com> Date: Tue, 4 Oct 2022 12:41:03 +0000 Subject: [PATCH 21/27] Pass EL JWT secret key via cli flag (#3568) ## Proposed Changes In this change I've added a new beacon_node cli flag `--execution-jwt-secret-key` for passing the JWT secret directly as string. Without this flag, it was non-trivial to pass a secrets file containing a JWT secret key without compromising its contents into some management repo or fiddling around with manual file mounts for cloud-based deployments. When used in combination with environment variables, the secret can be injected into container-based systems like docker & friends quite easily. It's both possible to either specify the file_path to the JWT secret or pass the JWT secret directly. I've modified the docs and attached a test as well. ## Additional Info The logic has been adapted a bit so that either one of `--execution-jwt` or `--execution-jwt-secret-key` must be set when specifying `--execution-endpoint` so that it's still compatible with the semantics before this change and there's at least one secret provided. --- beacon_node/src/cli.rs | 12 ++++++++++- beacon_node/src/config.rs | 35 +++++++++++++++++++++++++++------ book/src/merge-migration.md | 4 ++++ lighthouse/tests/beacon_node.rs | 23 +++++++++++++++++++++- 4 files changed, 66 insertions(+), 8 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 51e8762f1c..1e51849876 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -440,7 +440,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { JSON-RPC connection. Uses the same endpoint to populate the \ deposit cache.") .takes_value(true) - .requires("execution-jwt") ) .arg( Arg::with_name("execution-jwt") @@ -452,6 +451,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .requires("execution-endpoint") .takes_value(true) ) + .arg( + Arg::with_name("execution-jwt-secret-key") + .long("execution-jwt-secret-key") + .value_name("EXECUTION-JWT-SECRET-KEY") + .alias("jwt-secret-key") + .help("Hex-encoded JWT secret for the \ + execution endpoint provided in the --execution-endpoint flag.") + .requires("execution-endpoint") + .conflicts_with("execution-jwt") + .takes_value(true) + ) .arg( Arg::with_name("execution-jwt-id") .long("execution-jwt-id") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f1d0fb35a3..ecd4d736a6 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -3,6 +3,7 @@ use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; +use execution_layer::DEFAULT_JWT_FILE; use genesis::Eth1Endpoint; use http_api::TlsConfig; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; @@ -288,12 +289,34 @@ pub fn get_config( let execution_endpoint = parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?; - // Parse a single JWT secret, logging warnings if multiple are supplied. - // - // JWTs are required if `--execution-endpoint` is supplied. - let secret_files: String = clap_utils::parse_required(cli_args, "execution-jwt")?; - let secret_file = - parse_only_one_value(&secret_files, PathBuf::from_str, "--execution-jwt", log)?; + // JWTs are required if `--execution-endpoint` is supplied. They can be either passed via + // file_path or directly as string. + + let secret_file: PathBuf; + // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. + if let Some(secret_files) = cli_args.value_of("execution-jwt") { + secret_file = + parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; + + // Check if the JWT secret key is passed directly via cli flag and persist it to the default + // file location. + } else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") { + use std::fs::File; + use std::io::Write; + secret_file = client_config.data_dir.join(DEFAULT_JWT_FILE); + let mut jwt_secret_key_file = File::create(secret_file.clone()) + .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; + jwt_secret_key_file + .write_all(jwt_secret_key.as_bytes()) + .map_err(|e| { + format!( + "Error occured while writing to jwt_secret_key file: {:?}", + e + ) + })?; + } else { + return Err("Error! Please set either --execution-jwt file_path or --execution-jwt-secret-key directly via cli when using --execution-endpoint".to_string()); + } // Parse and set the payload builder, if any. if let Some(endpoint) = cli_args.value_of("builder") { diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 104a7ead6d..780be5836d 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -48,6 +48,10 @@ If you set up an execution engine with `--execution-endpoint` then you *must* pr using `--execution-jwt`. This is a mandatory form of authentication that ensures that Lighthouse has authority to control the execution engine. +> Tip: the --execution-jwt-secret-key flag can be used instead of --execution-jwt . +> This is useful, for example, for users who wish to inject the value into a Docker container without +> needing to pass a jwt secret file. + The execution engine connection must be **exclusive**, i.e. you must have one execution node per beacon node. The reason for this is that the beacon node _controls_ the execution node. Please see the [FAQ](#faq) for further information about why many:1 and 1:many configurations are not diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 2e76d832ce..a00fd7a822 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -4,7 +4,7 @@ use crate::exec::{CommandLineTestExec, CompletedTest}; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; -use std::io::Write; +use std::io::{Read, Write}; use std::net::IpAddr; use std::path::PathBuf; use std::process::Command; @@ -386,6 +386,27 @@ fn run_merge_execution_endpoints_flag_test(flag: &str) { }); } #[test] +fn run_execution_jwt_secret_key_is_persisted() { + let jwt_secret_key = "0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33"; + CommandLineTest::new() + .flag("execution-endpoint", Some("http://localhost:8551/")) + .flag("execution-jwt-secret-key", Some(jwt_secret_key)) + .run_with_zero_port() + .with_config(|config| { + let config = config.execution_layer.as_ref().unwrap(); + assert_eq!( + config.execution_endpoints[0].full.to_string(), + "http://localhost:8551/" + ); + let mut file_jwt_secret_key = String::new(); + File::open(config.secret_files[0].clone()) + .expect("could not open jwt_secret_key file") + .read_to_string(&mut file_jwt_secret_key) + .expect("could not read from file"); + assert_eq!(file_jwt_secret_key, jwt_secret_key); + }); +} +#[test] fn merge_execution_endpoints_flag() { run_merge_execution_endpoints_flag_test("execution-endpoints") } From 9f242137b0fe1968f8b57993efb0d806e8265d1d Mon Sep 17 00:00:00 2001 From: will Date: Wed, 12 Oct 2022 23:40:42 +0000 Subject: [PATCH 22/27] Add a new bls test (#3235) ## Issue Addressed Which issue # does this PR address? #2629 ## Proposed Changes Please list or describe the changes introduced by this PR. 1. ci would dowload the bls test cases from https://github.com/ethereum/bls12-381-tests/ 2. all the bls test cases(except eth ones) would use cases in the archive from step one 3. The bls test cases from https://github.com/ethereum/consensus-spec-tests would stay there and no use . For the future , these bls test cases would be remove suggested from https://github.com/ethereum/consensus-spec-tests/issues/25 . So it would do no harm and compatible for future cases. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. Question: I am not sure if I should implement tests about `deserialization_G1`, `deserialization_G2` and `hash_to_G2` for the issue. --- crypto/bls/src/lib.rs | 8 +++ testing/ef_tests/.gitignore | 1 + testing/ef_tests/Makefile | 21 +++++- testing/ef_tests/check_all_files_accessed.py | 8 ++- testing/ef_tests/src/cases.rs | 2 + .../ef_tests/src/cases/bls_aggregate_sigs.rs | 8 +-- .../src/cases/bls_aggregate_verify.rs | 8 +-- .../ef_tests/src/cases/bls_batch_verify.rs | 67 +++++++++++++++++ .../src/cases/bls_eth_aggregate_pubkeys.rs | 4 +- .../cases/bls_eth_fast_aggregate_verify.rs | 4 +- .../src/cases/bls_fast_aggregate_verify.rs | 8 +-- testing/ef_tests/src/cases/bls_sign_msg.rs | 8 +-- testing/ef_tests/src/cases/bls_verify_msg.rs | 8 +-- testing/ef_tests/src/cases/common.rs | 34 +++++---- testing/ef_tests/src/handler.rs | 71 +++++++++++++++++-- testing/ef_tests/tests/tests.rs | 6 ++ 16 files changed, 211 insertions(+), 55 deletions(-) create mode 100644 testing/ef_tests/src/cases/bls_batch_verify.rs diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index eacbc2b268..750e1bd5b8 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -90,6 +90,7 @@ pub mod generics { pub use crate::generic_secret_key::GenericSecretKey; pub use crate::generic_signature::GenericSignature; pub use crate::generic_signature_bytes::GenericSignatureBytes; + pub use crate::generic_signature_set::WrappedSignature; } /// Defines all the fundamental BLS points which should be exported by this crate by making @@ -109,6 +110,13 @@ macro_rules! define_mod { pub type AggregatePublicKey = GenericAggregatePublicKey; pub type Signature = GenericSignature; + pub type BlsWrappedSignature<'a> = WrappedSignature< + 'a, + bls_variant::PublicKey, + bls_variant::AggregatePublicKey, + bls_variant::Signature, + bls_variant::AggregateSignature, + >; pub type AggregateSignature = GenericAggregateSignature< bls_variant::PublicKey, bls_variant::AggregatePublicKey, diff --git a/testing/ef_tests/.gitignore b/testing/ef_tests/.gitignore index f3638b7bff..6a2ca1fe75 100644 --- a/testing/ef_tests/.gitignore +++ b/testing/ef_tests/.gitignore @@ -1,2 +1,3 @@ /consensus-spec-tests .accessed_file_log.txt +/bls12-381-tests diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index dc89cb5d5f..fac1ab905a 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -4,23 +4,38 @@ TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) - BASE_URL := https://github.com/ethereum/$(REPO_NAME)/releases/download/$(TESTS_TAG) +BLS_TEST_REPO_NAME := bls12-381-tests +BLS_TEST_TAG := v0.1.1 +BLS_TEST = bls_tests_yaml +BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST)) +BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME) +BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG) + +all: + make $(OUTPUT_DIR) + make $(BLS_OUTPUT_DIR) + $(OUTPUT_DIR): $(TARBALLS) mkdir $(OUTPUT_DIR) for test_tarball in $^; do \ tar -xzf $$test_tarball -C $(OUTPUT_DIR);\ done +$(BLS_OUTPUT_DIR): + mkdir $(BLS_OUTPUT_DIR) + wget $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL) + tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR) + %-$(TESTS_TAG).tar.gz: wget $(BASE_URL)/$*.tar.gz -O $@ clean-test-files: - rm -rf $(OUTPUT_DIR) + rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) clean-archives: - rm -f $(TARBALLS) + rm -f $(TARBALLS) $(BLS_TARBALL) clean: clean-test-files clean-archives diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index a10ccf1e6f..158e875810 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -46,7 +46,13 @@ excluded_paths = [ # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. - "tests/mainnet/bellatrix/operations/deposit/pyspec_tests/deposit_with_previous_fork_version__valid_ineffective/._meta.yaml" + "tests/mainnet/bellatrix/operations/deposit/pyspec_tests/deposit_with_previous_fork_version__valid_ineffective/._meta.yaml", + # bls tests are moved to bls12-381-tests directory + "tests/general/phase0/bls", + # some bls tests are not included now + "bls12-381-tests/deserialization_G1", + "bls12-381-tests/deserialization_G2", + "bls12-381-tests/hash_to_G2" ] def normalize_path(path): diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 64f4aa7538..ae70f1e07e 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -6,6 +6,7 @@ use types::ForkName; mod bls_aggregate_sigs; mod bls_aggregate_verify; +mod bls_batch_verify; mod bls_eth_aggregate_pubkeys; mod bls_eth_fast_aggregate_verify; mod bls_fast_aggregate_verify; @@ -29,6 +30,7 @@ mod transition; pub use self::fork_choice::*; pub use bls_aggregate_sigs::*; pub use bls_aggregate_verify::*; +pub use bls_batch_verify::*; pub use bls_eth_aggregate_pubkeys::*; pub use bls_eth_fast_aggregate_verify::*; pub use bls_fast_aggregate_verify::*; diff --git a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs index e0d0dd76ae..81e186a66b 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregateSignature, Signature}; use serde_derive::Deserialize; @@ -10,13 +10,9 @@ pub struct BlsAggregateSigs { pub output: String, } -impl BlsCase for BlsAggregateSigs {} +impl_bls_load_case!(BlsAggregateSigs); impl Case for BlsAggregateSigs { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut aggregate_signature = AggregateSignature::infinity(); diff --git a/testing/ef_tests/src/cases/bls_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_aggregate_verify.rs index ea7a7664fc..e9539dc15e 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_verify.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; use serde_derive::Deserialize; use types::Hash256; @@ -18,13 +18,9 @@ pub struct BlsAggregateVerify { pub output: bool, } -impl BlsCase for BlsAggregateVerify {} +impl_bls_load_case!(BlsAggregateVerify); impl Case for BlsAggregateVerify { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let messages = self .input diff --git a/testing/ef_tests/src/cases/bls_batch_verify.rs b/testing/ef_tests/src/cases/bls_batch_verify.rs new file mode 100644 index 0000000000..de8721d67d --- /dev/null +++ b/testing/ef_tests/src/cases/bls_batch_verify.rs @@ -0,0 +1,67 @@ +use super::*; +use crate::case_result::compare_result; +use crate::impl_bls_load_case; +use bls::{verify_signature_sets, BlsWrappedSignature, PublicKeyBytes, Signature, SignatureSet}; +use serde_derive::Deserialize; +use std::borrow::Cow; +use std::str::FromStr; +use types::Hash256; + +#[derive(Debug, Clone, Deserialize)] +pub struct BlsBatchVerifyInput { + pubkeys: Vec, + messages: Vec, + signatures: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct BlsBatchVerify { + pub input: BlsBatchVerifyInput, + pub output: bool, +} + +impl_bls_load_case!(BlsBatchVerify); + +impl Case for BlsBatchVerify { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let messages = self + .input + .messages + .iter() + .map(|s| Hash256::from_str(s).map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))) + .collect::, _>>()?; + + let pubkeys = self + .input + .pubkeys + .iter() + .map(|pkb| { + pkb.decompress() + .map_err(|_| Error::FailedToParseTest("pubkeys parse error".to_string())) + }) + .collect::, _>>()?; + + let signatures = self + .input + .signatures + .iter() + .map(|s| { + Signature::from_str(s).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) + }) + .collect::, _>>()?; + + let signature_set = messages + .iter() + .zip(pubkeys.iter()) + .zip(signatures.iter()) + .map(|((&message, pubkey), signature)| { + let wraped_signature = BlsWrappedSignature::from(signature); + SignatureSet::single_pubkey(wraped_signature, Cow::Borrowed(pubkey), message) + }) + .collect::>(); + + let signature_valid = verify_signature_sets(signature_set.iter()); + + compare_result::(&Ok(signature_valid), &Some(self.output)) + } +} diff --git a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs index 2ecc3b603d..c41fbca393 100644 --- a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs +++ b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregatePublicKey, PublicKeyBytes}; use serde_derive::Deserialize; @@ -10,7 +10,7 @@ pub struct BlsEthAggregatePubkeys { pub output: Option, } -impl BlsCase for BlsEthAggregatePubkeys {} +impl_bls_load_case!(BlsEthAggregatePubkeys, "data.yaml"); impl Case for BlsEthAggregatePubkeys { fn is_enabled_for_fork(fork_name: ForkName) -> bool { diff --git a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs index 62f9eb30c3..80e018459b 100644 --- a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; use serde_derive::Deserialize; use std::convert::TryInto; @@ -20,7 +20,7 @@ pub struct BlsEthFastAggregateVerify { pub output: bool, } -impl BlsCase for BlsEthFastAggregateVerify {} +impl_bls_load_case!(BlsEthFastAggregateVerify, "data.yaml"); impl Case for BlsEthFastAggregateVerify { fn is_enabled_for_fork(fork_name: ForkName) -> bool { diff --git a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs index 9722c05dc8..608995db9d 100644 --- a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; use serde_derive::Deserialize; use std::convert::TryInto; @@ -20,13 +20,9 @@ pub struct BlsFastAggregateVerify { pub output: bool, } -impl BlsCase for BlsFastAggregateVerify {} +impl_bls_load_case!(BlsFastAggregateVerify); impl Case for BlsFastAggregateVerify { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let message = Hash256::from_slice( &hex::decode(&self.input.message[2..]) diff --git a/testing/ef_tests/src/cases/bls_sign_msg.rs b/testing/ef_tests/src/cases/bls_sign_msg.rs index ad6b40cb77..53c13b569a 100644 --- a/testing/ef_tests/src/cases/bls_sign_msg.rs +++ b/testing/ef_tests/src/cases/bls_sign_msg.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::SecretKey; use serde_derive::Deserialize; use types::Hash256; @@ -17,13 +17,9 @@ pub struct BlsSign { pub output: Option, } -impl BlsCase for BlsSign {} +impl_bls_load_case!(BlsSign); impl Case for BlsSign { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { // Convert private_key and message to required types let sk = hex::decode(&self.input.privkey[2..]) diff --git a/testing/ef_tests/src/cases/bls_verify_msg.rs b/testing/ef_tests/src/cases/bls_verify_msg.rs index 190c09d52f..779b3cf75f 100644 --- a/testing/ef_tests/src/cases/bls_verify_msg.rs +++ b/testing/ef_tests/src/cases/bls_verify_msg.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{PublicKeyBytes, Signature, SignatureBytes}; use serde_derive::Deserialize; use std::convert::TryInto; @@ -19,13 +19,9 @@ pub struct BlsVerify { pub output: bool, } -impl BlsCase for BlsVerify {} +impl_bls_load_case!(BlsVerify); impl Case for BlsVerify { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let message = hex::decode(&self.input.message[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index e77e561939..e15a2e2ca3 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -1,24 +1,11 @@ -use crate::cases::LoadCase; -use crate::decode::yaml_decode_file; -use crate::error::Error; use serde_derive::Deserialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::Debug; -use std::path::Path; use tree_hash::TreeHash; use types::ForkName; -/// Trait for all BLS cases to eliminate some boilerplate. -pub trait BlsCase: serde::de::DeserializeOwned {} - -impl LoadCase for T { - fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { - yaml_decode_file(&path.join("data.yaml")) - } -} - /// Macro to wrap U128 and U256 so they deserialize correctly. macro_rules! uint_wrapper { ($wrapper_name:ident, $wrapped_type:ty) => { @@ -80,3 +67,24 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. } } + +#[macro_export] +macro_rules! impl_bls_load_case { + ($case_name:ident) => { + use $crate::decode::yaml_decode_file; + impl LoadCase for $case_name { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + yaml_decode_file(&path) + } + } + }; + + ($case_name:ident, $sub_path_name:expr) => { + use $crate::decode::yaml_decode_file; + impl LoadCase for $case_name { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + yaml_decode_file(&path.join($sub_path_name)) + } + } + }; +} diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 13c0a8c54a..92d5db7fde 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -49,8 +49,9 @@ pub trait Handler { let as_directory = |entry: Result| -> Option { entry .ok() - .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) + .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap()) }; + let test_cases = fs::read_dir(&handler_path) .unwrap_or_else(|e| panic!("handler dir {} exists: {:?}", handler_path.display(), e)) .filter_map(as_directory) @@ -58,6 +59,7 @@ pub trait Handler { .filter_map(as_directory) .map(|test_case_dir| { let path = test_case_dir.path(); + let case = Self::Case::load_from_dir(&path, fork_name).expect("test should load"); (path, case) }) @@ -75,7 +77,7 @@ pub trait Handler { } } -macro_rules! bls_handler { +macro_rules! bls_eth_handler { ($runner_name: ident, $case_name:ident, $handler_name:expr) => { #[derive(Derivative)] #[derivative(Default(bound = ""))] @@ -95,8 +97,69 @@ macro_rules! bls_handler { }; } +macro_rules! bls_handler { + ($runner_name: ident, $case_name:ident, $handler_name:expr) => { + #[derive(Derivative)] + #[derivative(Default(bound = ""))] + pub struct $runner_name; + + impl Handler for $runner_name { + type Case = cases::$case_name; + + fn runner_name() -> &'static str { + "bls" + } + + fn config_name() -> &'static str { + "bls12-381-tests" + } + + fn handler_name(&self) -> String { + $handler_name.into() + } + + fn run(&self) { + let fork_name = ForkName::Base; + let fork_name_str = fork_name.to_string(); + let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("consensus-spec-tests") + .join(Self::config_name()) + .join(self.handler_name()); + + let as_file = |entry: Result| -> Option { + entry + .ok() + .filter(|e| e.file_type().map(|ty| ty.is_file()).unwrap_or(false)) + }; + let test_cases: Vec<(PathBuf, Self::Case)> = fs::read_dir(&handler_path) + .expect("handler dir exists") + .filter_map(as_file) + .map(|test_case_path| { + let path = test_case_path.path(); + let case = + Self::Case::load_from_dir(&path, fork_name).expect("test should load"); + + (path, case) + }) + .collect(); + + let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); + + let name = format!( + "{}/{}/{}", + fork_name_str, + Self::runner_name(), + self.handler_name() + ); + crate::results::assert_tests_pass(&name, &handler_path, &results); + } + } + }; +} + bls_handler!(BlsAggregateSigsHandler, BlsAggregateSigs, "aggregate"); bls_handler!(BlsSignMsgHandler, BlsSign, "sign"); +bls_handler!(BlsBatchVerifyHandler, BlsBatchVerify, "batch_verify"); bls_handler!(BlsVerifyMsgHandler, BlsVerify, "verify"); bls_handler!( BlsAggregateVerifyHandler, @@ -108,12 +171,12 @@ bls_handler!( BlsFastAggregateVerify, "fast_aggregate_verify" ); -bls_handler!( +bls_eth_handler!( BlsEthAggregatePubkeysHandler, BlsEthAggregatePubkeys, "eth_aggregate_pubkeys" ); -bls_handler!( +bls_eth_handler!( BlsEthFastAggregateVerifyHandler, BlsEthFastAggregateVerify, "eth_fast_aggregate_verify" diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 31abbd1591..2c8b9d223b 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -118,6 +118,12 @@ fn bls_verify() { BlsVerifyMsgHandler::default().run(); } +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_batch_verify() { + BlsBatchVerifyHandler::default().run(); +} + #[test] #[cfg(not(feature = "fake_crypto"))] fn bls_aggregate_verify() { From ca9dc8e0947a0ec83f31830aaabc1ffbd3c14c9c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 15 Oct 2022 22:25:51 +0000 Subject: [PATCH 23/27] Optimise HTTP validator lookups (#3559) ## Issue Addressed While digging around in some logs I noticed that queries for validators by pubkey were taking 10ms+, which seemed too long. This was due to a loop through the entire validator registry for each lookup. ## Proposed Changes Rather than using a loop through the register, this PR utilises the pubkey cache which is usually initialised at the head*. In case the cache isn't built, we fall back to the previous loop logic. In the vast majority of cases I expect the cache will be built, as the validator client queries at the `head` where all caches should be built. ## Additional Info *I had to modify the cache build that runs after fork choice to build the pubkey cache. I think it had been optimised out, perhaps accidentally. I think it's preferable to have the exit cache and the pubkey cache built on the head state, as they are required for verifying deposits and exits respectively, and we may as well build them off the hot path of block processing. Previously they'd get built the first time a deposit or exit needed to be verified. I've deleted the unused `map_state` function which was obsoleted by `map_state_and_execution_optimistic`. --- .../beacon_chain/src/canonical_head.rs | 6 ++--- beacon_node/http_api/src/lib.rs | 21 ++++++++++++++-- beacon_node/http_api/src/state_id.rs | 25 ++----------------- .../per_block_processing/verify_deposit.rs | 4 +-- consensus/types/src/beacon_state.rs | 15 +++++++++++ 5 files changed, 40 insertions(+), 31 deletions(-) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index c9bd6db0e6..53e0fbaac9 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -654,11 +654,11 @@ impl BeaconChain { }) }) .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build the committee - // caches. + // Regardless of where we got the state from, attempt to build all the + // caches except the tree hash cache. snapshot .beacon_state - .build_all_committee_caches(&self.spec) + .build_all_caches(&self.spec) .map_err(Into::into) .map(|()| snapshot) })?; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5b4fa5816d..51e97c893d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -668,9 +668,10 @@ pub fn serve( "Invalid validator ID".to_string(), )) })) + .and(log_filter.clone()) .and(warp::path::end()) .and_then( - |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { + |state_id: StateId, chain: Arc>, validator_id: ValidatorId, log| { blocking_json_task(move || { let (data, execution_optimistic) = state_id .map_state_and_execution_optimistic( @@ -678,7 +679,23 @@ pub fn serve( |state, execution_optimistic| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { - state.validators().iter().position(|v| v.pubkey == *pubkey) + // Fast path: use the pubkey cache which is probably + // initialised at the head. + match state.get_validator_index_read_only(pubkey) { + Ok(result) => result, + Err(e) => { + // Slow path, fall back to iteration. + debug!( + log, + "Validator look-up cache miss"; + "reason" => ?e, + ); + state + .validators() + .iter() + .position(|v| v.pubkey == *pubkey) + } + } } ValidatorId::Index(index) => Some(*index as usize), }; diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 051789c953..44354217bc 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -155,33 +155,12 @@ impl StateId { Ok((state, execution_optimistic)) } - /* /// Map a function across the `BeaconState` identified by `self`. /// + /// The optimistic status of the requested state is also provided to the `func` closure. + /// /// This function will avoid instantiating/copying a new state when `self` points to the head /// of the chain. - #[allow(dead_code)] - pub fn map_state( - &self, - chain: &BeaconChain, - func: F, - ) -> Result - where - F: Fn(&BeaconState) -> Result, - { - match &self.0 { - CoreStateId::Head => chain - .with_head(|snapshot| Ok(func(&snapshot.beacon_state))) - .map_err(warp_utils::reject::beacon_chain_error)?, - _ => func(&self.state(chain)?), - } - } - */ - - /// Functions the same as `map_state` but additionally computes the value of - /// `execution_optimistic` of the state identified by `self`. - /// - /// This is to avoid re-instantiating `state` unnecessarily. pub fn map_state_and_execution_optimistic( &self, chain: &BeaconChain, diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index 3b43a8b41b..181b27ca1a 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -29,9 +29,7 @@ pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> /// Returns a `Some(validator index)` if a pubkey already exists in the `validators`, /// otherwise returns `None`. /// -/// ## Errors -/// -/// Errors if the state's `pubkey_cache` is not current. +/// Builds the pubkey cache if it is not already built. pub fn get_existing_validator_index( state: &mut BeaconState, pub_key: &PublicKeyBytes, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a5d00cdf2d..46a431d073 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -447,6 +447,21 @@ impl BeaconState { Ok(self.pubkey_cache().get(pubkey)) } + /// Immutable variant of `get_validator_index` which errors if the cache is not up to date. + pub fn get_validator_index_read_only( + &self, + pubkey: &PublicKeyBytes, + ) -> Result, Error> { + let pubkey_cache = self.pubkey_cache(); + if pubkey_cache.len() != self.validators().len() { + return Err(Error::PubkeyCacheIncomplete { + cache_len: pubkey_cache.len(), + registry_len: self.validators().len(), + }); + } + Ok(pubkey_cache.get(pubkey)) + } + /// The epoch corresponding to `self.slot()`. pub fn current_epoch(&self) -> Epoch { self.slot().epoch(T::slots_per_epoch()) From e4cbdc1c77d94e935ab838b2c2b1d5c4d7bf4018 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 15 Oct 2022 22:25:52 +0000 Subject: [PATCH 24/27] Optimistic sync spec tests (v1.2.0) (#3564) ## Issue Addressed Implements new optimistic sync test format from https://github.com/ethereum/consensus-specs/pull/2982. ## Proposed Changes - Add parsing and runner support for the new test format. - Extend the mock EL with a set of canned responses keyed by block hash. Although this doubles up on some of the existing functionality I think it's really nice to use compared to the `preloaded_responses` or static responses. I think we could write novel new opt sync tests using these primtives much more easily than the previous ones. Forks are natively supported, and different responses to `forkchoiceUpdated` and `newPayload` are also straight-forward. ## Additional Info Blocked on merge of the spec PR and release of new test vectors. --- Cargo.lock | 1 + .../src/engine_api/json_structures.rs | 4 +- .../src/test_utils/handle_rpc.rs | 14 ++++ .../execution_layer/src/test_utils/mod.rs | 46 +++++++++++++ testing/ef_tests/Cargo.toml | 1 + testing/ef_tests/Makefile | 2 +- .../ef_tests/src/cases/bls_aggregate_sigs.rs | 17 +++-- testing/ef_tests/src/cases/fork_choice.rs | 69 ++++++++++++++++--- testing/ef_tests/src/cases/operations.rs | 5 ++ testing/ef_tests/src/handler.rs | 31 +++++++++ testing/ef_tests/tests/tests.rs | 6 ++ 11 files changed, 177 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d0e3622e77..34c932307d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1500,6 +1500,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "ethereum-types 0.12.1", + "execution_layer", "fork_choice", "fs2", "hex", diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 31aa79f055..2b0c3a4c98 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,5 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; +use strum::EnumString; use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -311,8 +312,9 @@ impl From for ForkChoiceState { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, EnumString)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[strum(serialize_all = "SCREAMING_SNAKE_CASE")] pub enum JsonPayloadStatusV1Status { Valid, Invalid, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 975f09fa5e..ac677bf331 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -77,6 +77,11 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; + // Canned responses set by block hash take priority. + if let Some(status) = ctx.get_new_payload_status(&request.block_hash) { + return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); + } + let (static_response, should_import) = if let Some(mut response) = ctx.static_new_payload_response.lock().clone() { if response.status.status == PayloadStatusV1Status::Valid { @@ -120,6 +125,15 @@ pub async fn handle_rpc( let head_block_hash = forkchoice_state.head_block_hash; + // Canned responses set by block hash take priority. + if let Some(status) = ctx.get_fcu_payload_status(&head_block_hash) { + let response = JsonForkchoiceUpdatedV1Response { + payload_status: JsonPayloadStatusV1::from(status), + payload_id: None, + }; + return Ok(serde_json::to_value(response).unwrap()); + } + let mut response = ctx .execution_block_generator .write() diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index aaeea8aa5a..f5066879a7 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -12,6 +12,7 @@ use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; use slog::{info, Logger}; +use std::collections::HashMap; use std::convert::Infallible; use std::future::Future; use std::marker::PhantomData; @@ -98,6 +99,8 @@ impl MockServer { static_new_payload_response: <_>::default(), static_forkchoice_updated_response: <_>::default(), static_get_block_by_hash_response: <_>::default(), + new_payload_statuses: <_>::default(), + fcu_payload_statuses: <_>::default(), _phantom: PhantomData, }); @@ -370,6 +373,25 @@ impl MockServer { pub fn drop_all_blocks(&self) { self.ctx.execution_block_generator.write().drop_all_blocks() } + + pub fn set_payload_statuses(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) { + self.set_new_payload_status(block_hash, status.clone()); + self.set_fcu_payload_status(block_hash, status); + } + + pub fn set_new_payload_status(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) { + self.ctx + .new_payload_statuses + .lock() + .insert(block_hash, status); + } + + pub fn set_fcu_payload_status(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) { + self.ctx + .fcu_payload_statuses + .lock() + .insert(block_hash, status); + } } #[derive(Debug)] @@ -419,9 +441,33 @@ pub struct Context { pub static_new_payload_response: Arc>>, pub static_forkchoice_updated_response: Arc>>, pub static_get_block_by_hash_response: Arc>>>, + + // Canned responses by block hash. + // + // This is a more flexible and less stateful alternative to `static_new_payload_response` + // and `preloaded_responses`. + pub new_payload_statuses: Arc>>, + pub fcu_payload_statuses: Arc>>, + pub _phantom: PhantomData, } +impl Context { + pub fn get_new_payload_status( + &self, + block_hash: &ExecutionBlockHash, + ) -> Option { + self.new_payload_statuses.lock().get(block_hash).cloned() + } + + pub fn get_fcu_payload_status( + &self, + block_hash: &ExecutionBlockHash, + ) -> Option { + self.fcu_payload_statuses.lock().get(block_hash).cloned() + } +} + /// Configuration for the HTTP server. #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] pub struct Config { diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index e04d671396..04a222c7af 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -35,3 +35,4 @@ fs2 = "0.4.3" beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } fork_choice = { path = "../../consensus/fork_choice" } +execution_layer = { path = "../../beacon_node/execution_layer" } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index fac1ab905a..e05ef0b06b 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.2.0-rc.3 +TESTS_TAG := v1.2.0 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs index 81e186a66b..53387ee4d7 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -7,7 +7,7 @@ use serde_derive::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct BlsAggregateSigs { pub input: Vec, - pub output: String, + pub output: Option, } impl_bls_load_case!(BlsAggregateSigs); @@ -25,14 +25,13 @@ impl Case for BlsAggregateSigs { aggregate_signature.add_assign(&sig); } - // Check for YAML null value, indicating invalid input. This is a bit of a hack, - // as our mutating `aggregate_signature.add` API doesn't play nicely with aggregating 0 - // inputs. - let output_bytes = if self.output == "~" { - AggregateSignature::infinity().serialize().to_vec() - } else { - hex::decode(&self.output[2..]) - .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))? + let output_bytes = match self.output.as_deref() { + // Check for YAML null value, indicating invalid input. This is a bit of a hack, + // as our mutating `aggregate_signature.add` API doesn't play nicely with aggregating 0 + // inputs. + Some("~") | None => AggregateSignature::infinity().serialize().to_vec(), + Some(output) => hex::decode(&output[2..]) + .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?, }; let aggregate_signature = Ok(aggregate_signature.serialize().to_vec()); diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 0e1bb2aced..8faf4db821 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -9,7 +9,8 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainTypes, CachedHead, CountUnrealized, }; -use serde_derive::Deserialize; +use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; +use serde::Deserialize; use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; use std::future::Future; @@ -50,16 +51,53 @@ pub struct Checks { proposer_boost_root: Option, } +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct PayloadStatus { + status: JsonPayloadStatusV1Status, + latest_valid_hash: Option, + validation_error: Option, +} + +impl From for PayloadStatusV1 { + fn from(status: PayloadStatus) -> Self { + PayloadStatusV1 { + status: status.status.into(), + latest_valid_hash: status.latest_valid_hash, + validation_error: status.validation_error, + } + } +} + #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] pub enum Step { - Tick { tick: u64 }, - ValidBlock { block: B }, - MaybeValidBlock { block: B, valid: bool }, - Attestation { attestation: A }, - AttesterSlashing { attester_slashing: AS }, - PowBlock { pow_block: P }, - Checks { checks: Box }, + Tick { + tick: u64, + }, + ValidBlock { + block: B, + }, + MaybeValidBlock { + block: B, + valid: bool, + }, + Attestation { + attestation: A, + }, + AttesterSlashing { + attester_slashing: AS, + }, + PowBlock { + pow_block: P, + }, + OnPayloadInfo { + block_hash: ExecutionBlockHash, + payload_status: PayloadStatus, + }, + Checks { + checks: Box, + }, } #[derive(Debug, Clone, Deserialize)] @@ -119,6 +157,13 @@ impl LoadCase for ForkChoiceTest { ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) .map(|pow_block| Step::PowBlock { pow_block }) } + Step::OnPayloadInfo { + block_hash, + payload_status, + } => Ok(Step::OnPayloadInfo { + block_hash, + payload_status, + }), Step::Checks { checks } => Ok(Step::Checks { checks }), }) .collect::>()?; @@ -168,6 +213,14 @@ impl Case for ForkChoiceTest { tester.process_attester_slashing(attester_slashing) } Step::PowBlock { pow_block } => tester.process_pow_block(pow_block), + Step::OnPayloadInfo { + block_hash, + payload_status, + } => { + let el = tester.harness.mock_execution_layer.as_ref().unwrap(); + el.server + .set_payload_statuses(*block_hash, payload_status.clone().into()); + } Step::Checks { checks } => { let Checks { head, diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 798dae083b..54195cc236 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -117,6 +117,11 @@ impl Operation for Deposit { ssz_decode_file(path) } + fn is_enabled_for_fork(_: ForkName) -> bool { + // Some deposit tests require signature verification but are not marked as such. + cfg!(not(feature = "fake_crypto")) + } + fn apply_to( &self, state: &mut BeaconState, diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 92d5db7fde..dd5ed82da7 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -546,6 +546,37 @@ impl Handler for ForkChoiceHandler { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct OptimisticSyncHandler(PhantomData); + +impl Handler for OptimisticSyncHandler { + type Case = cases::ForkChoiceTest; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "sync" + } + + fn handler_name(&self) -> String { + "optimistic".into() + } + + fn use_rayon() -> bool { + // The opt sync tests use `block_on` which can cause panics with rayon. + false + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name != ForkName::Base + && fork_name != ForkName::Altair + && cfg!(not(feature = "fake_crypto")) + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct GenesisValidityHandler(PhantomData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 2c8b9d223b..28c57028cf 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -448,6 +448,12 @@ fn fork_choice_ex_ante() { ForkChoiceHandler::::new("ex_ante").run(); } +#[test] +fn optimistic_sync() { + OptimisticSyncHandler::::default().run(); + OptimisticSyncHandler::::default().run(); +} + #[test] fn genesis_initialization() { GenesisInitializationHandler::::default().run(); From 59ec6b71b8094f3673f9ca3471d5d93927f7097e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 15 Oct 2022 22:25:54 +0000 Subject: [PATCH 25/27] Consensus context with proposer index caching (#3604) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/2371 ## Proposed Changes Backport some changes from `tree-states` that remove duplicated calculations of the `proposer_index`. With this change the proposer index should be calculated only once for each block, and then plumbed through to every place it is required. ## Additional Info In future I hope to add more data to the consensus context that is cached on a per-epoch basis, like the effective balances of validators and the base rewards. There are some other changes to remove indexing in tests that were also useful for `tree-states` (the `tree-states` types don't implement `Index`). --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +- .../beacon_chain/src/block_verification.rs | 53 ++++++++--- beacon_node/beacon_chain/src/fork_revert.rs | 7 +- .../beacon_chain/tests/block_verification.rs | 8 +- beacon_node/store/src/reconstruct.rs | 9 +- .../state_processing/src/block_replayer.rs | 9 +- .../src/common/slash_validator.rs | 15 ++- .../state_processing/src/consensus_context.rs | 92 +++++++++++++++++++ consensus/state_processing/src/lib.rs | 2 + .../src/per_block_processing.rs | 38 ++++++-- .../block_signature_verifier.rs | 15 ++- .../src/per_block_processing/errors.rs | 24 ++++- .../process_operations.rs | 29 ++++-- .../per_block_processing/signature_sets.rs | 16 +++- .../src/per_block_processing/tests.rs | 61 +++++++++--- lcli/src/skip_slots.rs | 2 +- lcli/src/transition_blocks.rs | 8 +- testing/ef_tests/src/cases/operations.rs | 50 +++++++--- testing/ef_tests/src/cases/sanity_blocks.rs | 12 ++- testing/ef_tests/src/cases/transition.rs | 5 +- testing/state_transition_vectors/src/exit.rs | 26 ++++-- 21 files changed, 388 insertions(+), 100 deletions(-) create mode 100644 consensus/state_processing/src/consensus_context.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3eecc9a0dc..4429abc4c9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -80,7 +80,7 @@ use state_processing::{ }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, - BlockSignatureStrategy, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, + BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, }; use std::cmp::Ordering; use std::collections::HashMap; @@ -3498,7 +3498,6 @@ impl BeaconChain { } let slot = state.slot(); - let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; let sync_aggregate = if matches!(&state, BeaconState::Base(_)) { None @@ -3645,12 +3644,14 @@ impl BeaconChain { ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao, ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification, }; + // Use a context without block root or proposer index so that both are checked. + let mut ctxt = ConsensusContext::new(block.slot()); per_block_processing( &mut state, &block, - None, signature_strategy, VerifyBlockRoot::True, + &mut ctxt, &self.spec, )?; drop(process_timer); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index f83bc535d9..7f59f1cfec 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -71,7 +71,8 @@ use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, state_advance::partial_state_advance, - BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, VerifyBlockRoot, + BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, + VerifyBlockRoot, }; use std::borrow::Cow; use std::fs; @@ -549,7 +550,7 @@ pub fn signature_verify_chain_segment( let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); for (block_root, block) in &chain_segment { - signature_verifier.include_all_signatures(block, Some(*block_root))?; + signature_verifier.include_all_signatures(block, Some(*block_root), None)?; } if signature_verifier.verify().is_err() { @@ -560,10 +561,17 @@ pub fn signature_verify_chain_segment( let mut signature_verified_blocks = chain_segment .into_iter() - .map(|(block_root, block)| SignatureVerifiedBlock { - block, - block_root, - parent: None, + .map(|(block_root, block)| { + // Proposer index has already been verified above during signature verification. + let consensus_context = ConsensusContext::new(block.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + SignatureVerifiedBlock { + block, + block_root, + parent: None, + consensus_context, + } }) .collect::>(); @@ -582,6 +590,7 @@ pub struct GossipVerifiedBlock { pub block: Arc>, pub block_root: Hash256, parent: Option>, + consensus_context: ConsensusContext, } /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit @@ -590,6 +599,7 @@ pub struct SignatureVerifiedBlock { block: Arc>, block_root: Hash256, parent: Option>, + consensus_context: ConsensusContext, } /// Used to await the result of executing payload with a remote EE. @@ -863,10 +873,16 @@ impl GossipVerifiedBlock { // Validate the block's execution_payload (if any). validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?; + // Having checked the proposer index and the block root we can cache them. + let consensus_context = ConsensusContext::new(block.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + Ok(Self { block, block_root, parent, + consensus_context, }) } @@ -926,10 +942,13 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - signature_verifier.include_all_signatures(&block, Some(block_root))?; + signature_verifier.include_all_signatures(&block, Some(block_root), None)?; if signature_verifier.verify().is_ok() { Ok(Self { + consensus_context: ConsensusContext::new(block.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()), block, block_root, parent: Some(parent), @@ -972,13 +991,18 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - signature_verifier.include_all_signatures_except_proposal(&block)?; + // Gossip verification has already checked the proposer index. Use it to check the RANDAO + // signature. + let verified_proposer_index = Some(block.message().proposer_index()); + signature_verifier + .include_all_signatures_except_proposal(&block, verified_proposer_index)?; if signature_verifier.verify().is_ok() { Ok(Self { block, block_root: from.block_root, parent: Some(parent), + consensus_context: from.consensus_context, }) } else { Err(BlockError::InvalidSignature) @@ -1015,8 +1039,14 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? }; - ExecutionPendingBlock::from_signature_verified_components(block, block_root, parent, chain) - .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) + ExecutionPendingBlock::from_signature_verified_components( + block, + block_root, + parent, + self.consensus_context, + chain, + ) + .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) } fn block(&self) -> &SignedBeaconBlock { @@ -1057,6 +1087,7 @@ impl ExecutionPendingBlock { block: Arc>, block_root: Hash256, parent: PreProcessingSnapshot, + mut consensus_context: ConsensusContext, chain: &Arc>, ) -> Result> { if let Some(parent) = chain @@ -1340,10 +1371,10 @@ impl ExecutionPendingBlock { if let Err(err) = per_block_processing( &mut state, &block, - Some(block_root), // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut consensus_context, &chain.spec, ) { match err { diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 654b2713b1..3d48dfd8f6 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -5,7 +5,8 @@ use proto_array::CountUnrealizedFull; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ - per_block_processing, per_block_processing::BlockSignatureStrategy, VerifyBlockRoot, + per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext, + VerifyBlockRoot, }; use std::sync::Arc; use std::time::Duration; @@ -172,12 +173,14 @@ pub fn reset_fork_choice_to_finalization, Cold: It complete_state_advance(&mut state, None, block.slot(), spec) .map_err(|e| format!("State advance failed: {:?}", e))?; + let mut ctxt = ConsensusContext::new(block.slot()) + .set_proposer_index(block.message().proposer_index()); per_block_processing( &mut state, &block, - None, BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, spec, ) .map_err(|e| format!("Error replaying block: {:?}", e))?; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 0ff4e57a8a..998f22f770 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -11,7 +11,7 @@ use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ common::get_indexed_attestation, per_block_processing::{per_block_processing, BlockSignatureStrategy}, - per_slot_processing, BlockProcessingError, VerifyBlockRoot, + per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, }; use std::marker::PhantomData; use std::sync::Arc; @@ -1139,14 +1139,15 @@ async fn add_base_block_to_altair_chain() { // Ensure that it would be impossible to apply this block to `per_block_processing`. { let mut state = state; + let mut ctxt = ConsensusContext::new(base_block.slot()); per_slot_processing(&mut state, None, &harness.chain.spec).unwrap(); assert!(matches!( per_block_processing( &mut state, &base_block, - None, BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, &harness.chain.spec, ), Err(BlockProcessingError::InconsistentBlockFork( @@ -1271,14 +1272,15 @@ async fn add_altair_block_to_base_chain() { // Ensure that it would be impossible to apply this block to `per_block_processing`. { let mut state = state; + let mut ctxt = ConsensusContext::new(altair_block.slot()); per_slot_processing(&mut state, None, &harness.chain.spec).unwrap(); assert!(matches!( per_block_processing( &mut state, &altair_block, - None, BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, &harness.chain.spec, ), Err(BlockProcessingError::InconsistentBlockFork( diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 7db2652f2c..c939fd3f51 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -4,7 +4,8 @@ use crate::{Error, ItemStore, KeyValueStore}; use itertools::{process_results, Itertools}; use slog::info; use state_processing::{ - per_block_processing, per_slot_processing, BlockSignatureStrategy, VerifyBlockRoot, + per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, + VerifyBlockRoot, }; use std::sync::Arc; use types::{EthSpec, Hash256}; @@ -87,12 +88,16 @@ where // Apply block. if let Some(block) = block { + let mut ctxt = ConsensusContext::new(block.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + per_block_processing( &mut state, &block, - Some(block_root), BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, &self.spec, ) .map_err(HotColdDBError::BlockReplayBlockError)?; diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index d4b4b067e3..cc7bd17c50 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -1,6 +1,7 @@ use crate::{ per_block_processing, per_epoch_processing::EpochProcessingSummary, per_slot_processing, - BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, VerifyBlockRoot, + BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, + VerifyBlockRoot, }; use std::marker::PhantomData; use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -254,12 +255,16 @@ where VerifyBlockRoot::False } }); + // Proposer index was already checked when this block was originally processed, we + // can omit recomputing it during replay. + let mut ctxt = ConsensusContext::new(block.slot()) + .set_proposer_index(block.message().proposer_index()); per_block_processing( &mut self.state, block, - None, self.block_sig_strategy, verify_block_root, + &mut ctxt, self.spec, ) .map_err(BlockReplayError::from)?; diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index e9d94a1062..ac2dba875e 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -1,9 +1,13 @@ -use crate::common::{decrease_balance, increase_balance, initiate_validator_exit}; +use crate::{ + common::{decrease_balance, increase_balance, initiate_validator_exit}, + per_block_processing::errors::BlockProcessingError, + ConsensusContext, +}; use safe_arith::SafeArith; use std::cmp; use types::{ consts::altair::{PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, - BeaconStateError as Error, *, + *, }; /// Slash the validator with index `slashed_index`. @@ -11,8 +15,9 @@ pub fn slash_validator( state: &mut BeaconState, slashed_index: usize, opt_whistleblower_index: Option, + ctxt: &mut ConsensusContext, spec: &ChainSpec, -) -> Result<(), Error> { +) -> Result<(), BlockProcessingError> { let epoch = state.current_epoch(); initiate_validator_exit(state, slashed_index, spec)?; @@ -39,7 +44,7 @@ pub fn slash_validator( )?; // Apply proposer and whistleblower rewards - let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)?; + let proposer_index = ctxt.get_proposer_index(state, spec)? as usize; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); let whistleblower_reward = validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; @@ -52,7 +57,7 @@ pub fn slash_validator( // Ensure the whistleblower index is in the validator registry. if state.validators().get(whistleblower_index).is_none() { - return Err(BeaconStateError::UnknownValidator(whistleblower_index)); + return Err(BeaconStateError::UnknownValidator(whistleblower_index).into()); } increase_balance(state, proposer_index, proposer_reward)?; diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs new file mode 100644 index 0000000000..fdd3f95a65 --- /dev/null +++ b/consensus/state_processing/src/consensus_context.rs @@ -0,0 +1,92 @@ +use std::marker::PhantomData; +use tree_hash::TreeHash; +use types::{ + BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, + Slot, +}; + +#[derive(Debug)] +pub struct ConsensusContext { + /// Slot to act as an identifier/safeguard + slot: Slot, + /// Proposer index of the block at `slot`. + proposer_index: Option, + /// Block root of the block at `slot`. + current_block_root: Option, + _phantom: PhantomData, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum ContextError { + BeaconState(BeaconStateError), + SlotMismatch { slot: Slot, expected: Slot }, +} + +impl From for ContextError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl ConsensusContext { + pub fn new(slot: Slot) -> Self { + Self { + slot, + proposer_index: None, + current_block_root: None, + _phantom: PhantomData, + } + } + + pub fn set_proposer_index(mut self, proposer_index: u64) -> Self { + self.proposer_index = Some(proposer_index); + self + } + + pub fn get_proposer_index( + &mut self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + self.check_slot(state.slot())?; + + if let Some(proposer_index) = self.proposer_index { + return Ok(proposer_index); + } + + let proposer_index = state.get_beacon_proposer_index(self.slot, spec)? as u64; + self.proposer_index = Some(proposer_index); + Ok(proposer_index) + } + + pub fn set_current_block_root(mut self, block_root: Hash256) -> Self { + self.current_block_root = Some(block_root); + self + } + + pub fn get_current_block_root>( + &mut self, + block: &SignedBeaconBlock, + ) -> Result { + self.check_slot(block.slot())?; + + if let Some(current_block_root) = self.current_block_root { + return Ok(current_block_root); + } + + let current_block_root = block.message().tree_hash_root(); + self.current_block_root = Some(current_block_root); + Ok(current_block_root) + } + + fn check_slot(&self, slot: Slot) -> Result<(), ContextError> { + if slot == self.slot { + Ok(()) + } else { + Err(ContextError::SlotMismatch { + slot, + expected: self.slot, + }) + } + } +} diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index cf541d4572..e4f36bedd8 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -18,6 +18,7 @@ mod metrics; pub mod block_replayer; pub mod common; +pub mod consensus_context; pub mod genesis; pub mod per_block_processing; pub mod per_epoch_processing; @@ -27,6 +28,7 @@ pub mod upgrade; pub mod verify_operation; pub use block_replayer::{BlockReplayError, BlockReplayer, StateRootStrategy}; +pub use consensus_context::{ConsensusContext, ContextError}; pub use genesis::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, process_activations, diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index e409372ddd..cccc8eacd9 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -1,3 +1,4 @@ +use crate::consensus_context::ConsensusContext; use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid}; use rayon::prelude::*; use safe_arith::{ArithError, SafeArith}; @@ -90,9 +91,9 @@ pub enum VerifyBlockRoot { pub fn per_block_processing>( state: &mut BeaconState, signed_block: &SignedBeaconBlock, - block_root: Option, block_signature_strategy: BlockSignatureStrategy, verify_block_root: VerifyBlockRoot, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { let block = signed_block.message(); @@ -110,6 +111,8 @@ pub fn per_block_processing>( let verify_signatures = match block_signature_strategy { BlockSignatureStrategy::VerifyBulk => { // Verify all signatures in the block at once. + let block_root = Some(ctxt.get_current_block_root(signed_block)?); + let proposer_index = Some(ctxt.get_proposer_index(state, spec)?); block_verify!( BlockSignatureVerifier::verify_entire_block( state, @@ -117,6 +120,7 @@ pub fn per_block_processing>( |pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned), signed_block, block_root, + proposer_index, spec ) .is_ok(), @@ -133,11 +137,12 @@ pub fn per_block_processing>( state, block.temporary_block_header(), verify_block_root, + ctxt, spec, )?; if verify_signatures.is_true() { - verify_block_signature(state, signed_block, block_root, spec)?; + verify_block_signature(state, signed_block, ctxt, spec)?; } let verify_randao = if let BlockSignatureStrategy::VerifyRandao = block_signature_strategy { @@ -157,9 +162,9 @@ pub fn per_block_processing>( process_execution_payload(state, payload, spec)?; } - process_randao(state, block, verify_randao, spec)?; + process_randao(state, block, verify_randao, ctxt, spec)?; process_eth1_data(state, block.body().eth1_data())?; - process_operations(state, block.body(), proposer_index, verify_signatures, spec)?; + process_operations(state, block.body(), verify_signatures, ctxt, spec)?; if let Ok(sync_aggregate) = block.body().sync_aggregate() { process_sync_aggregate( @@ -179,6 +184,7 @@ pub fn process_block_header( state: &mut BeaconState, block_header: BeaconBlockHeader, verify_block_root: VerifyBlockRoot, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result> { // Verify that the slots match @@ -197,8 +203,8 @@ pub fn process_block_header( ); // Verify that proposer index is the correct index - let proposer_index = block_header.proposer_index as usize; - let state_proposer_index = state.get_beacon_proposer_index(block_header.slot, spec)?; + let proposer_index = block_header.proposer_index; + let state_proposer_index = ctxt.get_proposer_index(state, spec)?; verify!( proposer_index == state_proposer_index, HeaderInvalid::ProposerIndexMismatch { @@ -222,11 +228,11 @@ pub fn process_block_header( // Verify proposer is not slashed verify!( - !state.get_validator(proposer_index)?.slashed, + !state.get_validator(proposer_index as usize)?.slashed, HeaderInvalid::ProposerSlashed(proposer_index) ); - Ok(proposer_index as u64) + Ok(proposer_index) } /// Verifies the signature of a block. @@ -235,15 +241,18 @@ pub fn process_block_header( pub fn verify_block_signature>( state: &BeaconState, block: &SignedBeaconBlock, - block_root: Option, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockOperationError> { + let block_root = Some(ctxt.get_current_block_root(block)?); + let proposer_index = Some(ctxt.get_proposer_index(state, spec)?); verify!( block_proposal_signature_set( state, |i| get_pubkey_from_state(state, i), block, block_root, + proposer_index, spec )? .verify(), @@ -259,12 +268,21 @@ pub fn process_randao>( state: &mut BeaconState, block: BeaconBlockRef<'_, T, Payload>, verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { if verify_signatures.is_true() { // Verify RANDAO reveal signature. + let proposer_index = ctxt.get_proposer_index(state, spec)?; block_verify!( - randao_signature_set(state, |i| get_pubkey_from_state(state, i), block, spec)?.verify(), + randao_signature_set( + state, + |i| get_pubkey_from_state(state, i), + block, + Some(proposer_index), + spec + )? + .verify(), BlockProcessingError::RandaoSignatureInvalid ); } diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 78205ca92c..7584df14ec 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -123,10 +123,11 @@ where decompressor: D, block: &'a SignedBeaconBlock, block_root: Option, + verified_proposer_index: Option, spec: &'a ChainSpec, ) -> Result<()> { let mut verifier = Self::new(state, get_pubkey, decompressor, spec); - verifier.include_all_signatures(block, block_root)?; + verifier.include_all_signatures(block, block_root, verified_proposer_index)?; verifier.verify() } @@ -135,9 +136,10 @@ where &mut self, block: &'a SignedBeaconBlock, block_root: Option, + verified_proposer_index: Option, ) -> Result<()> { - self.include_block_proposal(block, block_root)?; - self.include_all_signatures_except_proposal(block)?; + self.include_block_proposal(block, block_root, verified_proposer_index)?; + self.include_all_signatures_except_proposal(block, verified_proposer_index)?; Ok(()) } @@ -147,8 +149,9 @@ where pub fn include_all_signatures_except_proposal>( &mut self, block: &'a SignedBeaconBlock, + verified_proposer_index: Option, ) -> Result<()> { - self.include_randao_reveal(block)?; + self.include_randao_reveal(block, verified_proposer_index)?; self.include_proposer_slashings(block)?; self.include_attester_slashings(block)?; self.include_attestations(block)?; @@ -164,12 +167,14 @@ where &mut self, block: &'a SignedBeaconBlock, block_root: Option, + verified_proposer_index: Option, ) -> Result<()> { let set = block_proposal_signature_set( self.state, self.get_pubkey.clone(), block, block_root, + verified_proposer_index, self.spec, )?; self.sets.push(set); @@ -180,11 +185,13 @@ where pub fn include_randao_reveal>( &mut self, block: &'a SignedBeaconBlock, + verified_proposer_index: Option, ) -> Result<()> { let set = randao_signature_set( self.state, self.get_pubkey.clone(), block.message(), + verified_proposer_index, self.spec, )?; self.sets.push(set); diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index e214b6e63d..71bd55f883 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -1,4 +1,5 @@ use super::signature_sets::Error as SignatureSetError; +use crate::ContextError; use merkle_proof::MerkleTreeError; use safe_arith::ArithError; use types::*; @@ -70,6 +71,7 @@ pub enum BlockProcessingError { found: u64, }, ExecutionInvalid, + ConsensusContext(ContextError), } impl From for BlockProcessingError { @@ -102,6 +104,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: ContextError) -> Self { + BlockProcessingError::ConsensusContext(e) + } +} + impl From> for BlockProcessingError { fn from(e: BlockOperationError) -> BlockProcessingError { match e { @@ -109,6 +117,7 @@ impl From> for BlockProcessingError { BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e), + BlockOperationError::ConsensusContext(e) => BlockProcessingError::ConsensusContext(e), BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e), } } @@ -136,6 +145,7 @@ macro_rules! impl_into_block_processing_error_with_index { BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e), + BlockOperationError::ConsensusContext(e) => BlockProcessingError::ConsensusContext(e), BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e), } } @@ -167,6 +177,7 @@ pub enum BlockOperationError { BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), + ConsensusContext(ContextError), ArithError(ArithError), } @@ -199,6 +210,12 @@ impl From for BlockOperationError { } } +impl From for BlockOperationError { + fn from(e: ContextError) -> Self { + BlockOperationError::ConsensusContext(e) + } +} + #[derive(Debug, PartialEq, Clone)] pub enum HeaderInvalid { ProposalSignatureInvalid, @@ -208,14 +225,14 @@ pub enum HeaderInvalid { block_slot: Slot, }, ProposerIndexMismatch { - block_proposer_index: usize, - state_proposer_index: usize, + block_proposer_index: u64, + state_proposer_index: u64, }, ParentBlockRootMismatch { state: Hash256, block: Hash256, }, - ProposerSlashed(usize), + ProposerSlashed(u64), } #[derive(Debug, PartialEq, Clone)] @@ -310,6 +327,7 @@ impl From> BlockOperationError::BeaconStateError(e) => BlockOperationError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockOperationError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockOperationError::SszTypesError(e), + BlockOperationError::ConsensusContext(e) => BlockOperationError::ConsensusContext(e), BlockOperationError::ArithError(e) => BlockOperationError::ArithError(e), } } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 31a4ac1fb4..1000586e66 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -12,23 +12,25 @@ use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_ pub fn process_operations<'a, T: EthSpec, Payload: ExecPayload>( state: &mut BeaconState, block_body: BeaconBlockBodyRef<'a, T, Payload>, - proposer_index: u64, verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { process_proposer_slashings( state, block_body.proposer_slashings(), verify_signatures, + ctxt, spec, )?; process_attester_slashings( state, block_body.attester_slashings(), verify_signatures, + ctxt, spec, )?; - process_attestations(state, block_body, proposer_index, verify_signatures, spec)?; + process_attestations(state, block_body, verify_signatures, ctxt, spec)?; process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; Ok(()) @@ -45,12 +47,13 @@ pub mod base { state: &mut BeaconState, attestations: &[Attestation], verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { // Ensure the previous epoch cache exists. state.build_committee_cache(RelativeEpoch::Previous, spec)?; - let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; + let proposer_index = ctxt.get_proposer_index(state, spec)?; // Verify and apply each attestation. for (i, attestation) in attestations.iter().enumerate() { @@ -87,10 +90,11 @@ pub mod altair { pub fn process_attestations( state: &mut BeaconState, attestations: &[Attestation], - proposer_index: u64, verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + let proposer_index = ctxt.get_proposer_index(state, spec)?; attestations .iter() .enumerate() @@ -170,6 +174,7 @@ pub fn process_proposer_slashings( state: &mut BeaconState, proposer_slashings: &[ProposerSlashing], verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { // Verify and apply proposer slashings in series. @@ -186,6 +191,7 @@ pub fn process_proposer_slashings( state, proposer_slashing.signed_header_1.message.proposer_index as usize, None, + ctxt, spec, )?; @@ -201,6 +207,7 @@ pub fn process_attester_slashings( state: &mut BeaconState, attester_slashings: &[AttesterSlashing], verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { for (i, attester_slashing) in attester_slashings.iter().enumerate() { @@ -211,7 +218,7 @@ pub fn process_attester_slashings( get_slashable_indices(state, attester_slashing).map_err(|e| e.into_with_index(i))?; for i in slashable_indices { - slash_validator(state, i as usize, None, spec)?; + slash_validator(state, i as usize, None, ctxt, spec)?; } } @@ -222,20 +229,26 @@ pub fn process_attester_slashings( pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( state: &mut BeaconState, block_body: BeaconBlockBodyRef<'a, T, Payload>, - proposer_index: u64, verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { match block_body { BeaconBlockBodyRef::Base(_) => { - base::process_attestations(state, block_body.attestations(), verify_signatures, spec)?; + base::process_attestations( + state, + block_body.attestations(), + verify_signatures, + ctxt, + spec, + )?; } BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) => { altair::process_attestations( state, block_body.attestations(), - proposer_index, verify_signatures, + ctxt, spec, )?; } diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 5ce1bfddd5..90bbdd56fe 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -76,6 +76,7 @@ pub fn block_proposal_signature_set<'a, T, F, Payload: ExecPayload>( get_pubkey: F, signed_block: &'a SignedBeaconBlock, block_root: Option, + verified_proposer_index: Option, spec: &'a ChainSpec, ) -> Result> where @@ -83,8 +84,12 @@ where F: Fn(usize) -> Option>, { let block = signed_block.message(); - let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)? as u64; + let proposer_index = if let Some(proposer_index) = verified_proposer_index { + proposer_index + } else { + state.get_beacon_proposer_index(block.slot(), spec)? as u64 + }; if proposer_index != block.proposer_index() { return Err(Error::IncorrectBlockProposer { block: block.proposer_index(), @@ -156,13 +161,18 @@ pub fn randao_signature_set<'a, T, F, Payload: ExecPayload>( state: &'a BeaconState, get_pubkey: F, block: BeaconBlockRef<'a, T, Payload>, + verified_proposer_index: Option, spec: &'a ChainSpec, ) -> Result> where T: EthSpec, F: Fn(usize) -> Option>, { - let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; + let proposer_index = if let Some(proposer_index) = verified_proposer_index { + proposer_index + } else { + state.get_beacon_proposer_index(block.slot(), spec)? as u64 + }; let domain = spec.get_domain( block.slot().epoch(T::slots_per_epoch()), @@ -178,7 +188,7 @@ where Ok(SignatureSet::single_pubkey( block.body().randao_reveal(), - get_pubkey(proposer_index).ok_or(Error::ValidatorUnknown(proposer_index as u64))?, + get_pubkey(proposer_index as usize).ok_or(Error::ValidatorUnknown(proposer_index))?, message, )) } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index e244e02c2d..b7d28832db 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -8,7 +8,7 @@ use crate::per_block_processing::errors::{ }; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, - BlockSignatureStrategy, VerifyBlockRoot, VerifySignatures, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use lazy_static::lazy_static; @@ -67,12 +67,13 @@ async fn valid_block_ok() { .make_block_return_pre_state(state, slot + Slot::new(1)) .await; + let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( &mut state, &block, - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -91,12 +92,13 @@ async fn invalid_block_header_state_slot() { let (mut block, signature) = signed_block.deconstruct(); *block.slot_mut() = slot + Slot::new(1); + let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( &mut state, &SignedBeaconBlock::from_block(block, signature), - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -122,12 +124,13 @@ async fn invalid_parent_block_root() { let (mut block, signature) = signed_block.deconstruct(); *block.parent_root_mut() = Hash256::from([0xAA; 32]); + let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( &mut state, &SignedBeaconBlock::from_block(block, signature), - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -154,12 +157,13 @@ async fn invalid_block_signature() { .await; let (block, _) = signed_block.deconstruct(); + let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( &mut state, &SignedBeaconBlock::from_block(block, Signature::empty()), - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -186,12 +190,13 @@ async fn invalid_randao_reveal_signature() { }) .await; + let mut ctxt = ConsensusContext::new(signed_block.slot()); let result = per_block_processing( &mut state, &signed_block, - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -386,11 +391,12 @@ async fn invalid_attestation_no_committee_for_index() { head_block.to_mut().body_mut().attestations_mut()[0] .data .index += 1; + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); @@ -424,11 +430,12 @@ async fn invalid_attestation_wrong_justified_checkpoint() { .data .source = new_justified_checkpoint; + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); @@ -463,11 +470,12 @@ async fn invalid_attestation_bad_aggregation_bitfield_len() { head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits = Bitfield::with_capacity(spec.target_committee_size).unwrap(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); @@ -495,11 +503,12 @@ async fn invalid_attestation_bad_signature() { .0; head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); // Expecting BadSignature because we're signing with invalid secret_keys @@ -533,11 +542,12 @@ async fn invalid_attestation_included_too_early() { .data .slot = new_attesation_slot; + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); @@ -575,11 +585,12 @@ async fn invalid_attestation_included_too_late() { .data .slot = new_attesation_slot; + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); assert_eq!( @@ -613,11 +624,12 @@ async fn invalid_attestation_target_epoch_slot_mismatch() { .target .epoch += Epoch::new(1); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); assert_eq!( @@ -640,10 +652,12 @@ async fn valid_insert_attester_slashing() { let attester_slashing = harness.make_attester_slashing(vec![1, 2]); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, &[attester_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -660,10 +674,12 @@ async fn invalid_attester_slashing_not_slashable() { attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, &[attester_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -686,10 +702,12 @@ async fn invalid_attester_slashing_1_invalid() { attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, &[attester_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -715,10 +733,12 @@ async fn invalid_attester_slashing_2_invalid() { attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, &[attester_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -741,10 +761,12 @@ async fn valid_insert_proposer_slashing() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); // Expecting Ok(_) because we inserted a valid proposer slashing @@ -760,10 +782,12 @@ async fn invalid_proposer_slashing_proposals_identical() { proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone(); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -787,10 +811,12 @@ async fn invalid_proposer_slashing_proposer_unknown() { proposer_slashing.signed_header_2.message.proposer_index = 3_141_592; let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -811,10 +837,12 @@ async fn invalid_proposer_slashing_duplicate_slashing() { let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result_1 = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing.clone()], VerifySignatures::False, + &mut ctxt, &spec, ); assert!(result_1.is_ok()); @@ -823,6 +851,7 @@ async fn invalid_proposer_slashing_duplicate_slashing() { &mut state, &[proposer_slashing], VerifySignatures::False, + &mut ctxt, &spec, ); // Expecting ProposerNotSlashable because we've already slashed the validator @@ -842,10 +871,12 @@ async fn invalid_bad_proposal_1_signature() { let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.signature = Signature::empty(); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -866,10 +897,12 @@ async fn invalid_bad_proposal_2_signature() { let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_2.signature = Signature::empty(); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -891,10 +924,12 @@ async fn invalid_proposer_slashing_proposal_epoch_mismatch() { proposer_slashing.signed_header_1.message.slot = Slot::new(0); proposer_slashing.signed_header_2.message.slot = Slot::new(128); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::False, + &mut ctxt, &spec, ); diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 28310f7683..8bd9af99ad 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -121,7 +121,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< }; for i in 0..runs { - let mut state = state.clone_with(CloneConfig::committee_caches_only()); + let mut state = state.clone_with(CloneConfig::all()); let start = Instant::now(); diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index dc825d2c02..b25cec81b5 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -74,7 +74,7 @@ use eth2::{ use ssz::Encode; use state_processing::{ block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, - BlockSignatureStrategy, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs::File; @@ -360,6 +360,7 @@ fn do_transition( decompressor, &block, Some(block_root), + Some(block.message().proposer_index()), spec, ) .map_err(|e| format!("Invalid block signature: {:?}", e))?; @@ -367,12 +368,15 @@ fn do_transition( } let t = Instant::now(); + let mut ctxt = ConsensusContext::new(pre_state.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); per_block_processing( &mut pre_state, &block, - None, BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, spec, ) .map_err(|e| format!("State transition failed: {:?}", e))?; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 54195cc236..a351a597c0 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -5,14 +5,17 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use crate::testing_spec; use crate::type_name::TypeName; use serde_derive::Deserialize; -use state_processing::per_block_processing::{ - errors::BlockProcessingError, - process_block_header, process_execution_payload, - process_operations::{ - altair, base, process_attester_slashings, process_deposits, process_exits, - process_proposer_slashings, +use state_processing::{ + per_block_processing::{ + errors::BlockProcessingError, + process_block_header, process_execution_payload, + process_operations::{ + altair, base, process_attester_slashings, process_deposits, process_exits, + process_proposer_slashings, + }, + process_sync_aggregate, VerifyBlockRoot, VerifySignatures, }, - process_sync_aggregate, VerifyBlockRoot, VerifySignatures, + ConsensusContext, }; use std::fmt::Debug; use std::path::Path; @@ -76,11 +79,16 @@ impl Operation for Attestation { spec: &ChainSpec, _: &Operations, ) -> Result<(), BlockProcessingError> { - let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; + let mut ctxt = ConsensusContext::new(state.slot()); + let proposer_index = ctxt.get_proposer_index(state, spec)?; match state { - BeaconState::Base(_) => { - base::process_attestations(state, &[self.clone()], VerifySignatures::True, spec) - } + BeaconState::Base(_) => base::process_attestations( + state, + &[self.clone()], + VerifySignatures::True, + &mut ctxt, + spec, + ), BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_attestation( state, self, @@ -108,7 +116,14 @@ impl Operation for AttesterSlashing { spec: &ChainSpec, _: &Operations, ) -> Result<(), BlockProcessingError> { - process_attester_slashings(state, &[self.clone()], VerifySignatures::True, spec) + let mut ctxt = ConsensusContext::new(state.slot()); + process_attester_slashings( + state, + &[self.clone()], + VerifySignatures::True, + &mut ctxt, + spec, + ) } } @@ -147,7 +162,14 @@ impl Operation for ProposerSlashing { spec: &ChainSpec, _: &Operations, ) -> Result<(), BlockProcessingError> { - process_proposer_slashings(state, &[self.clone()], VerifySignatures::True, spec) + let mut ctxt = ConsensusContext::new(state.slot()); + process_proposer_slashings( + state, + &[self.clone()], + VerifySignatures::True, + &mut ctxt, + spec, + ) } } @@ -189,10 +211,12 @@ impl Operation for BeaconBlock { spec: &ChainSpec, _: &Operations, ) -> Result<(), BlockProcessingError> { + let mut ctxt = ConsensusContext::new(state.slot()); process_block_header( state, self.to_ref().temporary_block_header(), VerifyBlockRoot::True, + &mut ctxt, spec, )?; Ok(()) diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index 5f0db25ded..8a75789724 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, - VerifyBlockRoot, + ConsensusContext, VerifyBlockRoot, }; use types::{BeaconState, EthSpec, ForkName, RelativeEpoch, SignedBeaconBlock}; @@ -91,26 +91,28 @@ impl Case for SanityBlocks { .build_committee_cache(RelativeEpoch::Current, spec) .unwrap(); + let mut ctxt = ConsensusContext::new(indiv_state.slot()); per_block_processing( &mut indiv_state, signed_block, - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, spec, )?; + let mut ctxt = ConsensusContext::new(indiv_state.slot()); per_block_processing( &mut bulk_state, signed_block, - None, BlockSignatureStrategy::VerifyBulk, VerifyBlockRoot::True, + &mut ctxt, spec, )?; - if block.state_root() == bulk_state.canonical_root() - && block.state_root() == indiv_state.canonical_root() + if block.state_root() == bulk_state.update_tree_hash_cache().unwrap() + && block.state_root() == indiv_state.update_tree_hash_cache().unwrap() { Ok(()) } else { diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index d2b1bb2c62..2c9134aba5 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -4,7 +4,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, - VerifyBlockRoot, + ConsensusContext, VerifyBlockRoot, }; use std::str::FromStr; use types::{BeaconState, Epoch, ForkName, SignedBeaconBlock}; @@ -91,12 +91,13 @@ impl Case for TransitionTest { .map_err(|e| format!("Failed to advance: {:?}", e))?; // Apply block. + let mut ctxt = ConsensusContext::new(state.slot()); per_block_processing( &mut state, block, - None, BlockSignatureStrategy::VerifyBulk, VerifyBlockRoot::True, + &mut ctxt, spec, ) .map_err(|e| format!("Block processing failed: {:?}", e))?; diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index f485e1a268..d581eba965 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -2,7 +2,7 @@ use super::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use state_processing::{ per_block_processing, per_block_processing::errors::ExitInvalid, BlockProcessingError, - BlockSignatureStrategy, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use types::{BeaconBlock, BeaconState, Epoch, EthSpec, SignedBeaconBlock}; @@ -64,12 +64,13 @@ impl ExitTest { block: &SignedBeaconBlock, state: &mut BeaconState, ) -> Result<(), BlockProcessingError> { + let mut ctxt = ConsensusContext::new(block.slot()); per_block_processing( state, block, - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &E::default_spec(), ) } @@ -125,7 +126,7 @@ vectors_and_tests!( ExitTest { block_modifier: Box::new(|_, block| { // Duplicate the exit - let exit = block.body().voluntary_exits()[0].clone(); + let exit = block.body().voluntary_exits().get(0).unwrap().clone(); block.body_mut().voluntary_exits_mut().push(exit).unwrap(); }), expected: Err(BlockProcessingError::ExitInvalid { @@ -144,7 +145,11 @@ vectors_and_tests!( invalid_validator_unknown, ExitTest { block_modifier: Box::new(|_, block| { - block.body_mut().voluntary_exits_mut()[0] + block + .body_mut() + .voluntary_exits_mut() + .get_mut(0) + .unwrap() .message .validator_index = VALIDATOR_COUNT as u64; }), @@ -165,7 +170,7 @@ vectors_and_tests!( invalid_exit_already_initiated, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut()[0].exit_epoch = STATE_EPOCH + 1; + state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH + 1; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -184,7 +189,8 @@ vectors_and_tests!( invalid_not_active_before_activation_epoch, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut()[0].activation_epoch = E::default_spec().far_future_epoch; + state.validators_mut().get_mut(0).unwrap().activation_epoch = + E::default_spec().far_future_epoch; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -203,7 +209,7 @@ vectors_and_tests!( invalid_not_active_after_exit_epoch, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut()[0].exit_epoch = STATE_EPOCH; + state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -303,7 +309,11 @@ vectors_and_tests!( block_modifier: Box::new(|_, block| { // Shift the validator index by 1 so that it's mismatched from the key that was // used to sign. - block.body_mut().voluntary_exits_mut()[0] + block + .body_mut() + .voluntary_exits_mut() + .get_mut(0) + .unwrap() .message .validator_index = VALIDATOR_INDEX + 1; }), From edf23bb40ee4f6e4b6024b66ca2a95caaf1811f0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 18 Oct 2022 04:02:06 +0000 Subject: [PATCH 26/27] Fix attestation shuffling filter (#3629) ## Issue Addressed Fix a bug in block production that results in blocks with 0 attestations during the first slot of an epoch. The bug is marked by debug logs of the form: > DEBG Discarding attestation because of missing ancestor, block_root: 0x3cc00d9c9e0883b2d0db8606278f2b8423d4902f9a1ee619258b5b60590e64f8, pivot_slot: 4042591 It occurs when trying to look up the shuffling decision root for an attestation from a slot which is prior to fork choice's finalized block. This happens frequently when proposing in the first slot of the epoch where we have: - `current_epoch == n` - `attestation.data.target.epoch == n - 1` - attestation shuffling epoch `== n - 3` (decision block being the last block of `n - 3`) - `state.finalized_checkpoint.epoch == n - 2` (first block of `n - 2` is finalized) Hence the shuffling decision slot is out of range of the fork choice backwards iterator _by a single slot_. Unfortunately this bug was hidden when we weren't pruning fork choice, and then reintroduced in v2.5.1 when we fixed the pruning (https://github.com/sigp/lighthouse/releases/tag/v2.5.1). There's no way to turn that off or disable the filtering in our current release, so we need a new release to fix this issue. Fortunately, it also does not occur on every epoch boundary because of the gradual pruning of fork choice every 256 blocks (~8 epochs): https://github.com/sigp/lighthouse/blob/01e84b71f524968f5b940fbd2fa31d29408b6581/consensus/proto_array/src/proto_array_fork_choice.rs#L16 https://github.com/sigp/lighthouse/blob/01e84b71f524968f5b940fbd2fa31d29408b6581/consensus/proto_array/src/proto_array.rs#L713-L716 So the probability of proposing a 0-attestation block given a proposal assignment is approximately `1/32 * 1/8 = 0.39%`. ## Proposed Changes - Load the block's shuffling ID from fork choice and verify it against the expected shuffling ID of the head state. This code was initially written before we had settled on a representation of shuffling IDs, so I think it's a nice simplification to make use of them here rather than more ad-hoc logic that fundamentally does the same thing. ## Additional Info Thanks to @moshe-blox for noticing this issue and bringing it to our attention. --- beacon_node/beacon_chain/src/beacon_chain.rs | 115 +++++++------- beacon_node/beacon_chain/tests/store_tests.rs | 140 +++++++++--------- 2 files changed, 139 insertions(+), 116 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4429abc4c9..86b43a1a39 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1991,60 +1991,75 @@ impl BeaconChain { target_epoch: Epoch, state: &BeaconState, ) -> bool { - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let shuffling_lookahead = 1 + self.spec.min_seed_lookahead.as_u64(); - - // Shuffling can't have changed if we're in the first few epochs - if state.current_epoch() < shuffling_lookahead { - return true; - } - - // Otherwise the shuffling is determined by the block at the end of the target epoch - // minus the shuffling lookahead (usually 2). We call this the "pivot". - let pivot_slot = - if target_epoch == state.previous_epoch() || target_epoch == state.current_epoch() { - (target_epoch - shuffling_lookahead).end_slot(slots_per_epoch) - } else { - return false; - }; - - let state_pivot_block_root = match state.get_block_root(pivot_slot) { - Ok(root) => *root, - Err(e) => { - warn!( - &self.log, - "Missing pivot block root for attestation"; - "slot" => pivot_slot, - "error" => ?e, - ); - return false; - } - }; - - // Use fork choice's view of the block DAG to quickly evaluate whether the attestation's - // pivot block is the same as the current state's pivot block. If it is, then the - // attestation's shuffling is the same as the current state's. - // To account for skipped slots, find the first block at *or before* the pivot slot. - let fork_choice_lock = self.canonical_head.fork_choice_read_lock(); - let pivot_block_root = fork_choice_lock - .proto_array() - .core_proto_array() - .iter_block_roots(block_root) - .find(|(_, slot)| *slot <= pivot_slot) - .map(|(block_root, _)| block_root); - drop(fork_choice_lock); - - match pivot_block_root { - Some(root) => root == state_pivot_block_root, - None => { + self.shuffling_is_compatible_result(block_root, target_epoch, state) + .unwrap_or_else(|e| { debug!( - &self.log, - "Discarding attestation because of missing ancestor"; - "pivot_slot" => pivot_slot.as_u64(), + self.log, + "Skipping attestation with incompatible shuffling"; "block_root" => ?block_root, + "target_epoch" => target_epoch, + "reason" => ?e, ); false + }) + } + + fn shuffling_is_compatible_result( + &self, + block_root: &Hash256, + target_epoch: Epoch, + state: &BeaconState, + ) -> Result { + // Compute the shuffling ID for the head state in the `target_epoch`. + let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), target_epoch) + .map_err(|e| Error::BeaconStateError(e.into()))?; + let head_shuffling_id = + AttestationShufflingId::new(self.genesis_block_root, state, relative_epoch)?; + + // Load the block's shuffling ID from fork choice. We use the variant of `get_block` that + // checks descent from the finalized block, so there's one case where we'll spuriously + // return `false`: where an attestation for the previous epoch nominates the pivot block + // which is the parent block of the finalized block. Such attestations are not useful, so + // this doesn't matter. + let fork_choice_lock = self.canonical_head.fork_choice_read_lock(); + let block = fork_choice_lock + .get_block(block_root) + .ok_or(Error::AttestationHeadNotInForkChoice(*block_root))?; + drop(fork_choice_lock); + + let block_shuffling_id = if target_epoch == block.current_epoch_shuffling_id.shuffling_epoch + { + block.current_epoch_shuffling_id + } else if target_epoch == block.next_epoch_shuffling_id.shuffling_epoch { + block.next_epoch_shuffling_id + } else if target_epoch > block.next_epoch_shuffling_id.shuffling_epoch { + AttestationShufflingId { + shuffling_epoch: target_epoch, + shuffling_decision_block: *block_root, } + } else { + debug!( + self.log, + "Skipping attestation with incompatible shuffling"; + "block_root" => ?block_root, + "target_epoch" => target_epoch, + "reason" => "target epoch less than block epoch" + ); + return Ok(false); + }; + + if head_shuffling_id == block_shuffling_id { + Ok(true) + } else { + debug!( + self.log, + "Skipping attestation with incompatible shuffling"; + "block_root" => ?block_root, + "target_epoch" => target_epoch, + "head_shuffling_id" => ?head_shuffling_id, + "block_shuffling_id" => ?block_shuffling_id, + ); + Ok(false) } } @@ -4460,7 +4475,7 @@ impl BeaconChain { /// /// If the committee for `(head_block_root, shuffling_epoch)` isn't found in the /// `shuffling_cache`, we will read a state from disk and then update the `shuffling_cache`. - pub(crate) fn with_committee_cache( + pub fn with_committee_cache( &self, head_block_root: Hash256, shuffling_epoch: Epoch, diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 883b871b1c..b1907bc96e 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -811,7 +811,6 @@ async fn shuffling_compatible_linear_chain() { let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - // Skip the block at the end of the first epoch. let head_block_root = harness .extend_chain( 4 * E::slots_per_epoch() as usize, @@ -824,10 +823,6 @@ async fn shuffling_compatible_linear_chain() { &harness, &get_state_for_block(&harness, head_block_root), head_block_root, - true, - true, - None, - None, ); } @@ -859,10 +854,6 @@ async fn shuffling_compatible_missing_pivot_block() { &harness, &get_state_for_block(&harness, head_block_root), head_block_root, - true, - true, - Some(E::slots_per_epoch() - 2), - Some(E::slots_per_epoch() - 2), ); } @@ -880,10 +871,10 @@ async fn shuffling_compatible_simple_fork() { let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); - check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None); - check_shuffling_compatible(&harness, &head1_state, head2, false, false, None, None); - check_shuffling_compatible(&harness, &head2_state, head1, false, false, None, None); - check_shuffling_compatible(&harness, &head2_state, head2, true, true, None, None); + check_shuffling_compatible(&harness, &head1_state, head1); + check_shuffling_compatible(&harness, &head1_state, head2); + check_shuffling_compatible(&harness, &head2_state, head1); + check_shuffling_compatible(&harness, &head2_state, head2); drop(db_path); } @@ -902,21 +893,10 @@ async fn shuffling_compatible_short_fork() { let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); - check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None); - check_shuffling_compatible(&harness, &head1_state, head2, false, true, None, None); - // NOTE: don't check this case, as block 14 from the first chain appears valid on the second - // chain due to it matching the second chain's block 15. - // check_shuffling_compatible(&harness, &head2_state, head1, false, true, None, None); - check_shuffling_compatible( - &harness, - &head2_state, - head2, - true, - true, - // Required because of the skipped slot. - Some(2 * E::slots_per_epoch() - 2), - None, - ); + check_shuffling_compatible(&harness, &head1_state, head1); + check_shuffling_compatible(&harness, &head1_state, head2); + check_shuffling_compatible(&harness, &head2_state, head1); + check_shuffling_compatible(&harness, &head2_state, head2); drop(db_path); } @@ -940,54 +920,82 @@ fn check_shuffling_compatible( harness: &TestHarness, head_state: &BeaconState, head_block_root: Hash256, - current_epoch_valid: bool, - previous_epoch_valid: bool, - current_epoch_cutoff_slot: Option, - previous_epoch_cutoff_slot: Option, ) { - let shuffling_lookahead = harness.chain.spec.min_seed_lookahead.as_u64() + 1; - let current_pivot_slot = - (head_state.current_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch()); - let previous_pivot_slot = - (head_state.previous_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch()); - for maybe_tuple in harness .chain .rev_iter_block_roots_from(head_block_root) .unwrap() { let (block_root, slot) = maybe_tuple.unwrap(); - // Shuffling is compatible targeting the current epoch, - // if slot is greater than or equal to the current epoch pivot block. - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, + + // Would an attestation to `block_root` at the current epoch be compatible with the head + // state's shuffling? + let current_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch(), + &head_state, + ); + + // Check for consistency with the more expensive shuffling lookup. + harness + .chain + .with_committee_cache( + block_root, head_state.current_epoch(), - &head_state - ), - current_epoch_valid - && slot >= current_epoch_cutoff_slot.unwrap_or(current_pivot_slot.as_u64()) - ); + |committee_cache, _| { + let state_cache = head_state.committee_cache(RelativeEpoch::Current).unwrap(); + if current_epoch_shuffling_is_compatible { + assert_eq!(committee_cache, state_cache, "block at slot {slot}"); + } else { + assert_ne!(committee_cache, state_cache, "block at slot {slot}"); + } + Ok(()) + }, + ) + .unwrap_or_else(|e| { + // If the lookup fails then the shuffling must be invalid in some way, e.g. the + // block with `block_root` is from a later epoch than `previous_epoch`. + assert!( + !current_epoch_shuffling_is_compatible, + "block at slot {slot} has compatible shuffling at epoch {} \ + but should be incompatible due to error: {e:?}", + head_state.current_epoch() + ); + }); + // Similarly for the previous epoch - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, + let previous_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( + &block_root, + head_state.previous_epoch(), + &head_state, + ); + harness + .chain + .with_committee_cache( + block_root, head_state.previous_epoch(), - &head_state - ), - previous_epoch_valid - && slot >= previous_epoch_cutoff_slot.unwrap_or(previous_pivot_slot.as_u64()) - ); - // Targeting the next epoch should always return false - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, - head_state.current_epoch() + 1, - &head_state - ), - false - ); - // Targeting two epochs before the current epoch should also always return false + |committee_cache, _| { + let state_cache = head_state.committee_cache(RelativeEpoch::Previous).unwrap(); + if previous_epoch_shuffling_is_compatible { + assert_eq!(committee_cache, state_cache); + } else { + assert_ne!(committee_cache, state_cache); + } + Ok(()) + }, + ) + .unwrap_or_else(|e| { + // If the lookup fails then the shuffling must be invalid in some way, e.g. the + // block with `block_root` is from a later epoch than `previous_epoch`. + assert!( + !previous_epoch_shuffling_is_compatible, + "block at slot {slot} has compatible shuffling at epoch {} \ + but should be incompatible due to error: {e:?}", + head_state.previous_epoch() + ); + }); + + // Targeting two epochs before the current epoch should always return false if head_state.current_epoch() >= 2 { assert_eq!( harness.chain.shuffling_is_compatible( From c5cd0d9b3f49f3fef76dc3be75a2d773b75bf9b2 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 18 Oct 2022 04:02:07 +0000 Subject: [PATCH 27/27] add execution-timeout-multiplier flag to optionally increase timeouts (#3631) ## Issue Addressed Add flag to lengthen execution layer timeouts Which issue # does this PR address? #3607 ## Proposed Changes Added execution-timeout-multiplier flag and a cli test to ensure the execution layer config has the multiplier set correctly. Please list or describe the changes introduced by this PR. Add execution_timeout_multiplier to the execution layer config as Option and pass the u32 to HttpJsonRpc. ## Additional Info Not certain that this is the best way to implement it so I'd appreciate any feedback. Please provide any additional information. For example, future considerations or information useful for reviewers. --- beacon_node/eth1/src/service.rs | 10 ++- beacon_node/eth1/tests/test.rs | 9 ++- .../execution_layer/src/engine_api/http.rs | 63 ++++++++++++++----- beacon_node/execution_layer/src/lib.rs | 5 +- beacon_node/src/cli.rs | 9 ++- beacon_node/src/config.rs | 3 + lighthouse/tests/beacon_node.rs | 16 +++++ 7 files changed, 91 insertions(+), 24 deletions(-) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index fae6eef9c2..c6b87e88e3 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -290,6 +290,7 @@ pub struct Config { pub max_blocks_per_update: Option, /// If set to true, the eth1 caches are wiped clean when the eth1 service starts. pub purge_cache: bool, + pub execution_timeout_multiplier: u32, } impl Config { @@ -347,6 +348,7 @@ impl Default for Config { max_log_requests_per_update: Some(5_000), max_blocks_per_update: Some(8_192), purge_cache: false, + execution_timeout_multiplier: 1, } } } @@ -361,11 +363,13 @@ pub fn endpoint_from_config(config: &Config) -> Result { } => { let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; - HttpJsonRpc::new_with_auth(endpoint, auth) + HttpJsonRpc::new_with_auth(endpoint, auth, Some(config.execution_timeout_multiplier)) + .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) + } + Eth1Endpoint::NoAuth(endpoint) => { + HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier)) .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) } - Eth1Endpoint::NoAuth(endpoint) => HttpJsonRpc::new(endpoint) - .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)), } } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 9f81f91e19..7e58f07e24 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -493,7 +493,8 @@ mod deposit_tree { let mut deposit_roots = vec![]; let mut deposit_counts = vec![]; - let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); + let client = + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); // Perform deposits to the smart contract, recording it's state along the way. for deposit in &deposits { @@ -597,7 +598,8 @@ mod http { .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); + let client = + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); let block_number = get_block_number(&web3).await; let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; @@ -711,7 +713,8 @@ mod fast { MainnetEthSpec::default_spec(), ) .unwrap(); - let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); + let client = + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 0f848a7716..be68c37b06 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -518,22 +518,32 @@ pub mod deposit_methods { pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, + pub execution_timeout_multiplier: u32, auth: Option, } impl HttpJsonRpc { - pub fn new(url: SensitiveUrl) -> Result { + pub fn new( + url: SensitiveUrl, + execution_timeout_multiplier: Option, + ) -> Result { Ok(Self { client: Client::builder().build()?, url, + execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), auth: None, }) } - pub fn new_with_auth(url: SensitiveUrl, auth: Auth) -> Result { + pub fn new_with_auth( + url: SensitiveUrl, + auth: Auth, + execution_timeout_multiplier: Option, + ) -> Result { Ok(Self { client: Client::builder().build()?, url, + execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), auth: Some(auth), }) } @@ -590,7 +600,11 @@ impl std::fmt::Display for HttpJsonRpc { impl HttpJsonRpc { pub async fn upcheck(&self) -> Result<(), Error> { let result: serde_json::Value = self - .rpc_request(ETH_SYNCING, json!([]), ETH_SYNCING_TIMEOUT) + .rpc_request( + ETH_SYNCING, + json!([]), + ETH_SYNCING_TIMEOUT * self.execution_timeout_multiplier, + ) .await?; /* @@ -614,7 +628,7 @@ impl HttpJsonRpc { self.rpc_request( ETH_GET_BLOCK_BY_NUMBER, params, - ETH_GET_BLOCK_BY_NUMBER_TIMEOUT, + ETH_GET_BLOCK_BY_NUMBER_TIMEOUT * self.execution_timeout_multiplier, ) .await } @@ -625,8 +639,12 @@ impl HttpJsonRpc { ) -> Result, Error> { let params = json!([block_hash, RETURN_FULL_TRANSACTION_OBJECTS]); - self.rpc_request(ETH_GET_BLOCK_BY_HASH, params, ETH_GET_BLOCK_BY_HASH_TIMEOUT) - .await + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await } pub async fn get_block_by_hash_with_txns( @@ -634,8 +652,12 @@ impl HttpJsonRpc { block_hash: ExecutionBlockHash, ) -> Result>, Error> { let params = json!([block_hash, true]); - self.rpc_request(ETH_GET_BLOCK_BY_HASH, params, ETH_GET_BLOCK_BY_HASH_TIMEOUT) - .await + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await } pub async fn new_payload_v1( @@ -645,7 +667,11 @@ impl HttpJsonRpc { let params = json!([JsonExecutionPayloadV1::from(execution_payload)]); let response: JsonPayloadStatusV1 = self - .rpc_request(ENGINE_NEW_PAYLOAD_V1, params, ENGINE_NEW_PAYLOAD_TIMEOUT) + .rpc_request( + ENGINE_NEW_PAYLOAD_V1, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) .await?; Ok(response.into()) @@ -658,7 +684,11 @@ impl HttpJsonRpc { let params = json!([JsonPayloadIdRequest::from(payload_id)]); let response: JsonExecutionPayloadV1 = self - .rpc_request(ENGINE_GET_PAYLOAD_V1, params, ENGINE_GET_PAYLOAD_TIMEOUT) + .rpc_request( + ENGINE_GET_PAYLOAD_V1, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) .await?; Ok(response.into()) @@ -678,7 +708,7 @@ impl HttpJsonRpc { .rpc_request( ENGINE_FORKCHOICE_UPDATED_V1, params, - ENGINE_FORKCHOICE_UPDATED_TIMEOUT, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, ) .await?; @@ -695,7 +725,8 @@ impl HttpJsonRpc { .rpc_request( ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, params, - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT, + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT + * self.execution_timeout_multiplier, ) .await?; @@ -732,13 +763,13 @@ mod test { let echo_auth = Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); ( - Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth).unwrap()), - Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None).unwrap()), ) } else { ( - Arc::new(HttpJsonRpc::new(rpc_url).unwrap()), - Arc::new(HttpJsonRpc::new(echo_url).unwrap()), + Arc::new(HttpJsonRpc::new(rpc_url, None).unwrap()), + Arc::new(HttpJsonRpc::new(echo_url, None).unwrap()), ) }; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 68071ee9b1..f222f28c33 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -159,6 +159,7 @@ pub struct Config { pub default_datadir: PathBuf, /// The minimum value of an external payload for it to be considered in a proposal. pub builder_profit_threshold: u128, + pub execution_timeout_multiplier: Option, } /// Provides access to one execution engine and provides a neat interface for consumption by the @@ -180,6 +181,7 @@ impl ExecutionLayer { jwt_version, default_datadir, builder_profit_threshold, + execution_timeout_multiplier, } = config; if urls.len() > 1 { @@ -224,7 +226,8 @@ impl ExecutionLayer { let engine: Engine = { let auth = Auth::new(jwt_key, jwt_id, jwt_version); debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); - let api = HttpJsonRpc::new_with_auth(execution_url, auth).map_err(Error::ApiError)?; + let api = HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier) + .map_err(Error::ApiError)?; Engine::new(api, executor.clone(), &log) }; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 1e51849876..0b7518b957 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -503,7 +503,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .requires("execution-endpoint") .takes_value(true) ) - + .arg( + Arg::with_name("execution-timeout-multiplier") + .long("execution-timeout-multiplier") + .value_name("NUM") + .help("Unsigned integer to multiply the default execution timeouts by.") + .default_value("1") + .takes_value(true) + ) /* * Database purging and compaction. */ diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index ecd4d736a6..7666134b41 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -335,6 +335,9 @@ pub fn get_config( el_config.default_datadir = client_config.data_dir.clone(); el_config.builder_profit_threshold = clap_utils::parse_required(cli_args, "builder-profit-threshold")?; + let execution_timeout_multiplier = + clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; + el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); // If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and // use `--execution-endpoint` instead. Also, log a deprecation warning. diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index a00fd7a822..34041a82c8 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -407,6 +407,22 @@ fn run_execution_jwt_secret_key_is_persisted() { }); } #[test] +fn execution_timeout_multiplier_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("execution-endpoint", Some("http://meow.cats")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .flag("execution-timeout-multiplier", Some("3")) + .run_with_zero_port() + .with_config(|config| { + let config = config.execution_layer.as_ref().unwrap(); + assert_eq!(config.execution_timeout_multiplier, Some(3)); + }); +} +#[test] fn merge_execution_endpoints_flag() { run_merge_execution_endpoints_flag_test("execution-endpoints") }