From a6d5c7d7e0b5fc371445211f3ed3d911268a867a Mon Sep 17 00:00:00 2001 From: Jack McPherson Date: Thu, 6 Jul 2023 07:35:31 +0000 Subject: [PATCH 01/75] Correct checks for backfill completeness (#4465) ## Issue Addressed #4331 ## Proposed Changes - Use comparison rather than strict equality between the earliest epoch we know about and the backfill target (which will be the most recent WSP by default or genesis) - Add helper function `BackFillSync::would_complete` to achieve this in one location ## Additional Info - There's an ad hoc test for this in #4461 Co-authored-by: Age Manning --- .../network/src/sync/backfill_sync/mod.rs | 31 +++++++------------ 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 460c8b1ee9..9f676ba017 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -1098,12 +1098,7 @@ impl BackFillSync { match self.batches.entry(batch_id) { Entry::Occupied(_) => { // this batch doesn't need downloading, let this same function decide the next batch - if batch_id - == self - .beacon_chain - .genesis_backfill_slot - .epoch(T::EthSpec::slots_per_epoch()) - { + if self.would_complete(batch_id) { self.last_batch_downloaded = true; } @@ -1114,12 +1109,7 @@ impl BackFillSync { } Entry::Vacant(entry) => { entry.insert(BatchInfo::new(&batch_id, BACKFILL_EPOCHS_PER_BATCH)); - if batch_id - == self - .beacon_chain - .genesis_backfill_slot - .epoch(T::EthSpec::slots_per_epoch()) - { + if self.would_complete(batch_id) { self.last_batch_downloaded = true; } self.to_be_downloaded = self @@ -1151,14 +1141,8 @@ impl BackFillSync { /// Checks with the beacon chain if backfill sync has completed. fn check_completed(&mut self) -> bool { - if self.current_start - == self - .beacon_chain - .genesis_backfill_slot - .epoch(T::EthSpec::slots_per_epoch()) - { + if self.would_complete(self.current_start) { // Check that the beacon chain agrees - if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { // Conditions that we have completed a backfill sync if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { @@ -1171,6 +1155,15 @@ impl BackFillSync { false } + /// Checks if backfill would complete by syncing to `start_epoch`. + fn would_complete(&self, start_epoch: Epoch) -> bool { + start_epoch + <= self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + } + /// Updates the global network state indicating the current state of a backfill sync. fn set_state(&self, state: BackFillState) { *self.network_globals.backfill_state.write() = state; From ea2420d193ee758ce372a430b857e4b2724cd7e4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 8 Jul 2023 13:16:06 +0000 Subject: [PATCH 02/75] Bump default checkpoint sync timeout to 3 minutes (#4466) ## Issue Addressed [Users on Twitter](https://twitter.com/ashekhirin/status/1676334843192397824) are getting checkpoint sync URL timeouts with the default of 60s, so this PR increases the default timeout to 3 minutes. I've also added a short section to the book about adjusting the timeout with `--checkpoint-sync-url-timeout`. --- beacon_node/src/cli.rs | 2 +- book/src/checkpoint-sync.md | 11 +++++++++++ lighthouse/tests/beacon_node.rs | 2 +- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 646356b6cb..fc53f57888 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -848,7 +848,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.") .value_name("SECONDS") .takes_value(true) - .default_value("60") + .default_value("180") ) .arg( Arg::with_name("reconstruct-historic-states") diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index d5c8b18e57..5788382894 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -48,6 +48,17 @@ The Ethereum community provides various [public endpoints](https://eth-clients.g lighthouse bn --checkpoint-sync-url https://example.com/ ... ``` +### Adjusting the timeout + +If the beacon node fails to start due to a timeout from the checkpoint sync server, you can try +running it again with a longer timeout by adding the flag `--checkpoint-sync-url-timeout`. + +``` +lighthouse bn --checkpoint-sync-url-timeout 300 --checkpoint-sync-url https://example.com/ ... +``` + +The flag takes a value in seconds. For more information see `lighthouse bn --help`. + ## Backfilling Blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 9b6d23ddcf..4badcef3cf 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -177,7 +177,7 @@ fn checkpoint_sync_url_timeout_default() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert_eq!(config.chain.checkpoint_sync_url_timeout, 60); + assert_eq!(config.chain.checkpoint_sync_url_timeout, 180); }); } From c25825a5393aca2cde6e33dd673f8c074c2b543b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 10 Jul 2023 07:45:54 +0000 Subject: [PATCH 03/75] Move the `BeaconProcessor` into a new crate (#4435) *Replaces #4434. It is identical, but this PR has a smaller diff due to a curated commit history.* ## Issue Addressed NA ## Proposed Changes This PR moves the scheduling logic for the `BeaconProcessor` into a new crate in `beacon_node/beacon_processor`. Previously it existed in the `beacon_node/network` crate. This addresses a circular-dependency problem where it's not possible to use the `BeaconProcessor` from the `beacon_chain` crate. The `network` crate depends on the `beacon_chain` crate (`network -> beacon_chain`), but importing the `BeaconProcessor` into the `beacon_chain` crate would create a circular dependancy of `beacon_chain -> network`. The `BeaconProcessor` was designed to provide queuing and prioritized scheduling for messages from the network. It has proven to be quite valuable and I believe we'd make Lighthouse more stable and effective by using it elsewhere. In particular, I think we should use the `BeaconProcessor` for: 1. HTTP API requests. 1. Scheduled tasks in the `BeaconChain` (e.g., state advance). Using the `BeaconProcessor` for these tasks would help prevent the BN from becoming overwhelmed and would also help it to prioritize operations (e.g., choosing to process blocks from gossip before responding to low-priority HTTP API requests). ## Additional Info This PR is intended to have zero impact on runtime behaviour. It aims to simply separate the *scheduling* code (i.e., the `BeaconProcessor`) from the *business logic* in the `network` crate (i.e., the `Worker` impls). Future PRs (see #4462) can build upon these works to actually use the `BeaconProcessor` for more operations. I've gone to some effort to use `git mv` to make the diff look more like "file was moved and modified" rather than "file was deleted and a new one added". This should reduce review burden and help maintain commit attribution. --- Cargo.lock | 28 + Cargo.toml | 1 + beacon_node/Cargo.toml | 2 +- beacon_node/beacon_processor/Cargo.toml | 24 + .../mod.rs => beacon_processor/src/lib.rs} | 1266 ++++------------- beacon_node/beacon_processor/src/metrics.rs | 141 ++ .../src}/work_reprocessing_queue.rs | 243 ++-- beacon_node/client/Cargo.toml | 2 + beacon_node/client/src/builder.rs | 42 +- beacon_node/network/Cargo.toml | 6 +- .../src/beacon_processor/worker/mod.rs | 51 - beacon_node/network/src/lib.rs | 2 +- beacon_node/network/src/metrics.rs | 117 +- .../gossip_methods.rs | 202 ++- .../src/network_beacon_processor/mod.rs | 590 ++++++++ .../rpc_methods.rs | 14 +- .../sync_methods.rs | 109 +- .../tests.rs | 139 +- beacon_node/network/src/router.rs | 260 ++-- beacon_node/network/src/service.rs | 7 +- beacon_node/network/src/service/tests.rs | 23 +- .../network/src/sync/backfill_sync/mod.rs | 6 +- .../network/src/sync/block_lookups/mod.rs | 20 +- .../network/src/sync/block_lookups/tests.rs | 21 +- beacon_node/network/src/sync/manager.rs | 45 +- .../network/src/sync/network_context.rs | 29 +- .../network/src/sync/range_sync/chain.rs | 10 +- .../network/src/sync/range_sync/range.rs | 17 +- 28 files changed, 1743 insertions(+), 1674 deletions(-) create mode 100644 beacon_node/beacon_processor/Cargo.toml rename beacon_node/{network/src/beacon_processor/mod.rs => beacon_processor/src/lib.rs} (58%) create mode 100644 beacon_node/beacon_processor/src/metrics.rs rename beacon_node/{network/src/beacon_processor => beacon_processor/src}/work_reprocessing_queue.rs (85%) delete mode 100644 beacon_node/network/src/beacon_processor/worker/mod.rs rename beacon_node/network/src/{beacon_processor/worker => network_beacon_processor}/gossip_methods.rs (95%) create mode 100644 beacon_node/network/src/network_beacon_processor/mod.rs rename beacon_node/network/src/{beacon_processor/worker => network_beacon_processor}/rpc_methods.rs (98%) rename beacon_node/network/src/{beacon_processor/worker => network_beacon_processor}/sync_methods.rs (90%) rename beacon_node/network/src/{beacon_processor => network_beacon_processor}/tests.rs (89%) diff --git a/Cargo.lock b/Cargo.lock index e360bdd62e..532f7ff204 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -700,6 +700,30 @@ dependencies = [ "unused_port", ] +[[package]] +name = "beacon_processor" +version = "0.1.0" +dependencies = [ + "derivative", + "ethereum_ssz", + "fnv", + "futures", + "hex", + "itertools", + "lazy_static", + "lighthouse_metrics", + "lighthouse_network", + "logging", + "parking_lot 0.12.1", + "slog", + "slot_clock", + "strum", + "task_executor", + "tokio", + "tokio-util 0.6.10", + "types", +] + [[package]] name = "bincode" version = "1.3.3" @@ -1148,6 +1172,7 @@ name = "client" version = "0.2.0" dependencies = [ "beacon_chain", + "beacon_processor", "directory", "dirs", "environment", @@ -1165,6 +1190,7 @@ dependencies = [ "logging", "monitoring_api", "network", + "num_cpus", "operation_pool", "parking_lot 0.12.1", "sensitive_url", @@ -5405,6 +5431,7 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", + "beacon_processor", "delay_map", "derivative", "environment", @@ -5428,6 +5455,7 @@ dependencies = [ "matches", "num_cpus", "operation_pool", + "parking_lot 0.12.1", "rand 0.8.5", "rlp", "slog", diff --git a/Cargo.toml b/Cargo.toml index 5c39e01ed1..775842a8c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "beacon_node", "beacon_node/beacon_chain", + "beacon_node/beacon_processor", "beacon_node/builder_client", "beacon_node/client", "beacon_node/eth1", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 7c74365418..f55c724dc3 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -41,4 +41,4 @@ monitoring_api = { path = "../common/monitoring_api" } sensitive_url = { path = "../common/sensitive_url" } http_api = { path = "http_api" } unused_port = { path = "../common/unused_port" } -strum = "0.24.1" +strum = "0.24.1" \ No newline at end of file diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml new file mode 100644 index 0000000000..5c5200e101 --- /dev/null +++ b/beacon_node/beacon_processor/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "beacon_processor" +version = "0.1.0" +edition = "2021" + +[dependencies] +slog = { version = "2.5.2", features = ["max_level_trace"] } +itertools = "0.10.0" +logging = { path = "../../common/logging" } +tokio = { version = "1.14.0", features = ["full"] } +tokio-util = { version = "0.6.3", features = ["time"] } +futures = "0.3.7" +fnv = "1.0.7" +strum = "0.24.0" +task_executor = { path = "../../common/task_executor" } +slot_clock = { path = "../../common/slot_clock" } +lighthouse_network = { path = "../lighthouse_network" } +hex = "0.4.2" +derivative = "2.2.0" +types = { path = "../../consensus/types" } +ethereum_ssz = "0.5.0" +lazy_static = "1.4.0" +lighthouse_metrics = { path = "../../common/lighthouse_metrics" } +parking_lot = "0.12.0" \ No newline at end of file diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/beacon_processor/src/lib.rs similarity index 58% rename from beacon_node/network/src/beacon_processor/mod.rs rename to beacon_node/beacon_processor/src/lib.rs index 84d8e1b07a..88066f2a30 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -38,51 +38,34 @@ //! checks the queues to see if there are more parcels of work that can be spawned in a new worker //! task. -use crate::sync::manager::BlockProcessType; -use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; -use beacon_chain::parking_lot::Mutex; -use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExecutionLayer}; -use derivative::Derivative; +use crate::work_reprocessing_queue::{ + spawn_reprocess_scheduler, QueuedAggregate, QueuedBackfillBatch, QueuedGossipBlock, + QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, ReprocessQueueMessage, +}; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; -use lighthouse_network::rpc::LightClientBootstrapRequest; -use lighthouse_network::{ - rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, - Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, -}; +use lighthouse_network::NetworkGlobals; +use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; +use parking_lot::Mutex; use slog::{crit, debug, error, trace, warn, Logger}; -use std::collections::VecDeque; +use slot_clock::SlotClock; +use std::cmp; +use std::collections::{HashSet, VecDeque}; +use std::fmt; use std::future::Future; -use std::path::PathBuf; use std::pin::Pin; -use std::sync::{Arc, Weak}; +use std::sync::Arc; use std::task::Context; use std::time::Duration; -use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; -use types::{ - Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, -}; -use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, - QueuedUnaggregate, ReadyWork, -}; +use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, Slot, SubnetId}; +use work_reprocessing_queue::IgnoredRpcBlock; -use worker::{Toolbox, Worker}; - -mod tests; -mod work_reprocessing_queue; -mod worker; - -use crate::beacon_processor::work_reprocessing_queue::{ - QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage, -}; -pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; +mod metrics; +pub mod work_reprocessing_queue; /// The maximum size of the channel for work events to the `BeaconProcessor`. /// @@ -96,7 +79,7 @@ pub const MAX_WORK_EVENT_QUEUE_LEN: usize = 16_384; const MAX_IDLE_QUEUE_LEN: usize = 16_384; /// The maximum size of the channel for re-processing work events. -const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * MAX_WORK_EVENT_QUEUE_LEN / 4; +pub const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * MAX_WORK_EVENT_QUEUE_LEN / 4; /// The maximum number of queued `Attestation` objects that will be stored before we start dropping /// them. @@ -221,6 +204,7 @@ pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution"; pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; +pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -366,394 +350,104 @@ impl DuplicateCache { } /// An event to be processed by the manager task. -#[derive(Derivative)] -#[derivative(Debug(bound = "T: BeaconChainTypes"))] -pub struct WorkEvent { - drop_during_sync: bool, - work: Work, +#[derive(Debug)] +pub struct WorkEvent { + pub drop_during_sync: bool, + pub work: Work, } -impl WorkEvent { - /// Create a new `Work` event for some unaggregated attestation. - pub fn unaggregated_attestation( - message_id: MessageId, - peer_id: PeerId, - attestation: Attestation, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipAttestation { - message_id, - peer_id, - attestation: Box::new(attestation), - subnet_id, - should_import, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some aggregated attestation. - pub fn aggregated_attestation( - message_id: MessageId, - peer_id: PeerId, - aggregate: SignedAggregateAndProof, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipAggregate { - message_id, - peer_id, - aggregate: Box::new(aggregate), - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some block. - pub fn gossip_beacon_block( - message_id: MessageId, - peer_id: PeerId, - peer_client: Client, - block: Arc>, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipBlock { - message_id, - peer_id, - peer_client, - block, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some sync committee signature. - pub fn gossip_sync_signature( - message_id: MessageId, - peer_id: PeerId, - sync_signature: SyncCommitteeMessage, - subnet_id: SyncSubnetId, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipSyncSignature { - message_id, - peer_id, - sync_signature: Box::new(sync_signature), - subnet_id, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some sync committee contribution. - pub fn gossip_sync_contribution( - message_id: MessageId, - peer_id: PeerId, - sync_contribution: SignedContributionAndProof, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipSyncContribution { - message_id, - peer_id, - sync_contribution: Box::new(sync_contribution), - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some exit. - pub fn gossip_voluntary_exit( - message_id: MessageId, - peer_id: PeerId, - voluntary_exit: Box, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipVoluntaryExit { - message_id, - peer_id, - voluntary_exit, - }, - } - } - - /// Create a new `Work` event for some proposer slashing. - pub fn gossip_proposer_slashing( - message_id: MessageId, - peer_id: PeerId, - proposer_slashing: Box, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipProposerSlashing { - message_id, - peer_id, - proposer_slashing, - }, - } - } - - /// Create a new `Work` event for some light client finality update. - pub fn gossip_light_client_finality_update( - message_id: MessageId, - peer_id: PeerId, - light_client_finality_update: Box>, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipLightClientFinalityUpdate { - message_id, - peer_id, - light_client_finality_update, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some light client optimistic update. - pub fn gossip_light_client_optimistic_update( - message_id: MessageId, - peer_id: PeerId, - light_client_optimistic_update: Box>, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipLightClientOptimisticUpdate { - message_id, - peer_id, - light_client_optimistic_update, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some attester slashing. - pub fn gossip_attester_slashing( - message_id: MessageId, - peer_id: PeerId, - attester_slashing: Box>, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipAttesterSlashing { - message_id, - peer_id, - attester_slashing, - }, - } - } - - /// Create a new `Work` event for some BLS to execution change. - pub fn gossip_bls_to_execution_change( - message_id: MessageId, - peer_id: PeerId, - bls_to_execution_change: Box, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipBlsToExecutionChange { - message_id, - peer_id, - bls_to_execution_change, - }, - } - } - - /// Create a new `Work` event for some block, where the result from computation (if any) is - /// sent to the other side of `result_tx`. - pub fn rpc_beacon_block( - block_root: Hash256, - block: Arc>, - seen_timestamp: Duration, - process_type: BlockProcessType, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::RpcBlock { - block_root, - block, - seen_timestamp, - process_type, - should_process: true, - }, - } - } - - /// Create a new work event to import `blocks` as a beacon chain segment. - pub fn chain_segment( - process_id: ChainSegmentProcessId, - blocks: Vec>>, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::ChainSegment { process_id, blocks }, - } - } - - /// Create a new work event to process `StatusMessage`s from the RPC network. - pub fn status_message(peer_id: PeerId, message: StatusMessage) -> Self { - Self { - drop_during_sync: false, - work: Work::Status { peer_id, message }, - } - } - - /// Create a new work event to process `BlocksByRangeRequest`s from the RPC network. - pub fn blocks_by_range_request( - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRangeRequest, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::BlocksByRangeRequest { - peer_id, - request_id, - request, - }, - } - } - - /// Create a new work event to process `BlocksByRootRequest`s from the RPC network. - pub fn blocks_by_roots_request( - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRootRequest, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::BlocksByRootsRequest { - peer_id, - request_id, - request, - }, - } - } - - /// Create a new work event to process `LightClientBootstrap`s from the RPC network. - pub fn lightclient_bootstrap_request( - peer_id: PeerId, - request_id: PeerRequestId, - request: LightClientBootstrapRequest, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::LightClientBootstrapRequest { - peer_id, - request_id, - request, - }, - } - } - +impl WorkEvent { /// Get a `str` representation of the type of work this `WorkEvent` contains. pub fn work_type(&self) -> &'static str { self.work.str_id() } } -impl std::convert::From> for WorkEvent { - fn from(ready_work: ReadyWork) -> Self { +impl std::convert::From for WorkEvent { + fn from(ready_work: ReadyWork) -> Self { match ready_work { ReadyWork::Block(QueuedGossipBlock { - peer_id, - block, - seen_timestamp, + beacon_block_slot, + beacon_block_root, + process_fn, }) => Self { drop_during_sync: false, work: Work::DelayedImportBlock { - peer_id, - block, - seen_timestamp, + beacon_block_slot, + beacon_block_root, + process_fn, }, }, ReadyWork::RpcBlock(QueuedRpcBlock { - block_root, - block, - seen_timestamp, - process_type, - should_process, + beacon_block_root: _, + process_fn, + ignore_fn: _, }) => Self { drop_during_sync: false, - work: Work::RpcBlock { - block_root, - block, - seen_timestamp, - process_type, - should_process, - }, + work: Work::RpcBlock { process_fn }, + }, + ReadyWork::IgnoredRpcBlock(IgnoredRpcBlock { process_fn }) => Self { + drop_during_sync: false, + work: Work::IgnoredRpcBlock { process_fn }, }, ReadyWork::Unaggregate(QueuedUnaggregate { - peer_id, - message_id, - attestation, - subnet_id, - should_import, - seen_timestamp, + beacon_block_root: _, + process_fn, }) => Self { drop_during_sync: true, - work: Work::UnknownBlockAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - }, + work: Work::UnknownBlockAttestation { process_fn }, }, ReadyWork::Aggregate(QueuedAggregate { - peer_id, - message_id, - attestation, - seen_timestamp, + process_fn, + beacon_block_root: _, }) => Self { drop_during_sync: true, - work: Work::UnknownBlockAggregate { - message_id, - peer_id, - aggregate: attestation, - seen_timestamp, - }, + work: Work::UnknownBlockAggregate { process_fn }, }, ReadyWork::LightClientUpdate(QueuedLightClientUpdate { - peer_id, - message_id, - light_client_optimistic_update, - seen_timestamp, - .. + parent_root, + process_fn, }) => Self { drop_during_sync: true, work: Work::UnknownLightClientOptimisticUpdate { - message_id, - peer_id, - light_client_optimistic_update, - seen_timestamp, + parent_root, + process_fn, }, }, - ReadyWork::BackfillSync(QueuedBackfillBatch { process_id, blocks }) => { - WorkEvent::chain_segment(process_id, blocks) - } + ReadyWork::BackfillSync(QueuedBackfillBatch(process_fn)) => Self { + drop_during_sync: false, + work: Work::ChainSegmentBackfill(process_fn), + }, } } } -pub struct BeaconProcessorSend(pub mpsc::Sender>); +/// Items required to verify a batch of unaggregated gossip attestations. +#[derive(Debug)] +pub struct GossipAttestationPackage { + pub message_id: MessageId, + pub peer_id: PeerId, + pub attestation: Box>, + pub subnet_id: SubnetId, + pub should_import: bool, + pub seen_timestamp: Duration, +} -impl BeaconProcessorSend { - pub fn try_send(&self, message: WorkEvent) -> Result<(), Box>>> { +/// Items required to verify a batch of aggregated gossip attestations. +#[derive(Debug)] +pub struct GossipAggregatePackage { + pub message_id: MessageId, + pub peer_id: PeerId, + pub aggregate: Box>, + pub beacon_block_root: Hash256, + pub seen_timestamp: Duration, +} + +#[derive(Clone)] +pub struct BeaconProcessorSend(pub mpsc::Sender>); + +impl BeaconProcessorSend { + pub fn try_send(&self, message: WorkEvent) -> Result<(), TrySendError>> { let work_type = message.work_type(); match self.0.try_send(message) { Ok(res) => Ok(res), @@ -762,146 +456,82 @@ impl BeaconProcessorSend { &metrics::BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE, &[work_type], ); - Err(Box::new(e)) + Err(e) } } } } -/// A consensus message (or multiple) from the network that requires processing. -#[derive(Derivative)] -#[derivative(Debug(bound = "T: BeaconChainTypes"))] -pub enum Work { +pub type AsyncFn = Pin + Send + Sync>>; +pub type BlockingFn = Box; +pub type BlockingFnWithManualSendOnIdle = Box; + +/// Indicates the type of work to be performed and therefore its priority and +/// queuing specifics. +pub enum Work { GossipAttestation { - message_id: MessageId, - peer_id: PeerId, - attestation: Box>, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, + attestation: GossipAttestationPackage, + process_individual: Box) + Send + Sync>, + process_batch: Box>) + Send + Sync>, }, UnknownBlockAttestation { - message_id: MessageId, - peer_id: PeerId, - attestation: Box>, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, + process_fn: BlockingFn, }, GossipAttestationBatch { - packages: Vec>, + attestations: Vec>, + process_batch: Box>) + Send + Sync>, }, GossipAggregate { - message_id: MessageId, - peer_id: PeerId, - aggregate: Box>, - seen_timestamp: Duration, + aggregate: GossipAggregatePackage, + process_individual: Box) + Send + Sync>, + process_batch: Box>) + Send + Sync>, }, UnknownBlockAggregate { - message_id: MessageId, - peer_id: PeerId, - aggregate: Box>, - seen_timestamp: Duration, + process_fn: BlockingFn, }, UnknownLightClientOptimisticUpdate { - message_id: MessageId, - peer_id: PeerId, - light_client_optimistic_update: Box>, - seen_timestamp: Duration, + parent_root: Hash256, + process_fn: BlockingFn, }, GossipAggregateBatch { - packages: Vec>, - }, - GossipBlock { - message_id: MessageId, - peer_id: PeerId, - peer_client: Client, - block: Arc>, - seen_timestamp: Duration, + aggregates: Vec>, + process_batch: Box>) + Send + Sync>, }, + GossipBlock(AsyncFn), DelayedImportBlock { - peer_id: PeerId, - block: Box>, - seen_timestamp: Duration, - }, - GossipVoluntaryExit { - message_id: MessageId, - peer_id: PeerId, - voluntary_exit: Box, - }, - GossipProposerSlashing { - message_id: MessageId, - peer_id: PeerId, - proposer_slashing: Box, - }, - GossipAttesterSlashing { - message_id: MessageId, - peer_id: PeerId, - attester_slashing: Box>, - }, - GossipSyncSignature { - message_id: MessageId, - peer_id: PeerId, - sync_signature: Box, - subnet_id: SyncSubnetId, - seen_timestamp: Duration, - }, - GossipSyncContribution { - message_id: MessageId, - peer_id: PeerId, - sync_contribution: Box>, - seen_timestamp: Duration, - }, - GossipLightClientFinalityUpdate { - message_id: MessageId, - peer_id: PeerId, - light_client_finality_update: Box>, - seen_timestamp: Duration, - }, - GossipLightClientOptimisticUpdate { - message_id: MessageId, - peer_id: PeerId, - light_client_optimistic_update: Box>, - seen_timestamp: Duration, + beacon_block_slot: Slot, + beacon_block_root: Hash256, + process_fn: AsyncFn, }, + GossipVoluntaryExit(BlockingFn), + GossipProposerSlashing(BlockingFn), + GossipAttesterSlashing(BlockingFn), + GossipSyncSignature(BlockingFn), + GossipSyncContribution(BlockingFn), + GossipLightClientFinalityUpdate(BlockingFn), + GossipLightClientOptimisticUpdate(BlockingFn), RpcBlock { - block_root: Hash256, - block: Arc>, - seen_timestamp: Duration, - process_type: BlockProcessType, - should_process: bool, + process_fn: AsyncFn, }, - ChainSegment { - process_id: ChainSegmentProcessId, - blocks: Vec>>, - }, - Status { - peer_id: PeerId, - message: StatusMessage, - }, - BlocksByRangeRequest { - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRangeRequest, - }, - BlocksByRootsRequest { - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRootRequest, - }, - GossipBlsToExecutionChange { - message_id: MessageId, - peer_id: PeerId, - bls_to_execution_change: Box, - }, - LightClientBootstrapRequest { - peer_id: PeerId, - request_id: PeerRequestId, - request: LightClientBootstrapRequest, + IgnoredRpcBlock { + process_fn: BlockingFn, }, + ChainSegment(AsyncFn), + ChainSegmentBackfill(AsyncFn), + Status(BlockingFn), + BlocksByRangeRequest(BlockingFnWithManualSendOnIdle), + BlocksByRootsRequest(BlockingFnWithManualSendOnIdle), + GossipBlsToExecutionChange(BlockingFn), + LightClientBootstrapRequest(BlockingFn), } -impl Work { +impl fmt::Debug for Work { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.str_id()) + } +} + +impl Work { /// Provides a `&str` that uniquely identifies each enum variant. fn str_id(&self) -> &'static str { match self { @@ -909,58 +539,56 @@ impl Work { Work::GossipAttestationBatch { .. } => GOSSIP_ATTESTATION_BATCH, Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, - Work::GossipBlock { .. } => GOSSIP_BLOCK, + Work::GossipBlock(_) => GOSSIP_BLOCK, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, - Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT, - Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING, - Work::GossipAttesterSlashing { .. } => GOSSIP_ATTESTER_SLASHING, - Work::GossipSyncSignature { .. } => GOSSIP_SYNC_SIGNATURE, - Work::GossipSyncContribution { .. } => GOSSIP_SYNC_CONTRIBUTION, - Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, - Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, + Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT, + Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING, + Work::GossipAttesterSlashing(_) => GOSSIP_ATTESTER_SLASHING, + Work::GossipSyncSignature(_) => GOSSIP_SYNC_SIGNATURE, + Work::GossipSyncContribution(_) => GOSSIP_SYNC_CONTRIBUTION, + Work::GossipLightClientFinalityUpdate(_) => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, + Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, - Work::ChainSegment { - process_id: ChainSegmentProcessId::BackSyncBatchId { .. }, - .. - } => CHAIN_SEGMENT_BACKFILL, + Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, - Work::Status { .. } => STATUS_PROCESSING, - Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, - Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, - Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, + Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL, + Work::Status(_) => STATUS_PROCESSING, + Work::BlocksByRangeRequest(_) => BLOCKS_BY_RANGE_REQUEST, + Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST, + Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, - Work::GossipBlsToExecutionChange { .. } => GOSSIP_BLS_TO_EXECUTION_CHANGE, + Work::GossipBlsToExecutionChange(_) => GOSSIP_BLS_TO_EXECUTION_CHANGE, Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, } } } /// Unifies all the messages processed by the `BeaconProcessor`. -enum InboundEvent { +enum InboundEvent { /// A worker has completed a task and is free. WorkerIdle, /// There is new work to be done. - WorkEvent(WorkEvent), + WorkEvent(WorkEvent), /// A work event that was queued for re-processing has become ready. - ReprocessingWork(WorkEvent), + ReprocessingWork(WorkEvent), } /// Combines the various incoming event streams for the `BeaconProcessor` into a single stream. /// /// This struct has a similar purpose to `tokio::select!`, however it allows for more fine-grained /// control (specifically in the ordering of event processing). -struct InboundEvents { +struct InboundEvents { /// Used by workers when they finish a task. idle_rx: mpsc::Receiver<()>, /// Used by upstream processes to send new work to the `BeaconProcessor`. - event_rx: mpsc::Receiver>, + event_rx: mpsc::Receiver>, /// Used internally for queuing work ready to be re-processed. - reprocess_work_rx: mpsc::Receiver>, + reprocess_work_rx: mpsc::Receiver, } -impl Stream for InboundEvents { - type Item = InboundEvent; +impl Stream for InboundEvents { + type Item = InboundEvent; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // Always check for idle workers before anything else. This allows us to ensure that a big @@ -1001,31 +629,20 @@ impl Stream for InboundEvents { } } -/// Defines if and where we will store the SSZ files of invalid blocks. -#[derive(Clone)] -pub enum InvalidBlockStorage { - Enabled(PathBuf), - Disabled, -} - /// A mutli-threaded processor for messages received on the network /// that need to be processed by the `BeaconChain` /// /// See module level documentation for more information. -pub struct BeaconProcessor { - pub beacon_chain: Weak>, - pub network_tx: mpsc::UnboundedSender>, - pub sync_tx: mpsc::UnboundedSender>, - pub network_globals: Arc>, +pub struct BeaconProcessor { + pub network_globals: Arc>, pub executor: TaskExecutor, pub max_workers: usize, pub current_workers: usize, - pub importing_blocks: DuplicateCache, - pub invalid_block_storage: InvalidBlockStorage, + pub enable_backfill_rate_limiting: bool, pub log: Logger, } -impl BeaconProcessor { +impl BeaconProcessor { /// Spawns the "manager" task which checks the receiver end of the returned `Sender` for /// messages which contain some new work which will be: /// @@ -1037,10 +654,13 @@ impl BeaconProcessor { /// /// The optional `work_journal_tx` allows for an outside process to receive a log of all work /// events processed by `self`. This should only be used during testing. - pub fn spawn_manager( + pub fn spawn_manager( mut self, - event_rx: mpsc::Receiver>, + event_rx: mpsc::Receiver>, + work_reprocessing_tx: mpsc::Sender, + work_reprocessing_rx: mpsc::Receiver, work_journal_tx: Option>, + slot_clock: S, ) { // Used by workers to communicate that they are finished a task. let (idle_tx, idle_rx) = mpsc::channel::<()>(MAX_IDLE_QUEUE_LEN); @@ -1093,20 +713,15 @@ impl BeaconProcessor { let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); - let chain = match self.beacon_chain.upgrade() { - Some(chain) => chain, - // No need to proceed any further if the beacon chain has been dropped, the client - // is shutting down. - None => return, - }; - // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). - let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); - let work_reprocessing_tx = spawn_reprocess_scheduler( + let (ready_work_tx, ready_work_rx) = + mpsc::channel::(MAX_SCHEDULED_WORK_QUEUE_LEN); + spawn_reprocess_scheduler( ready_work_tx, + work_reprocessing_rx, &self.executor, - chain.slot_clock.clone(), + slot_clock, self.log.clone(), ); @@ -1121,7 +736,7 @@ impl BeaconProcessor { reprocess_work_rx: ready_work_rx, }; - let enable_backfill_rate_limiting = chain.config.enable_backfill_rate_limiting; + let enable_backfill_rate_limiting = self.enable_backfill_rate_limiting; loop { let work_event = match inbound_events.next().await { @@ -1209,33 +824,29 @@ impl BeaconProcessor { .as_ref() .map_or(false, |event| event.drop_during_sync); + let idle_tx = idle_tx.clone(); match work_event { // There is no new work event, but we are able to spawn a new worker. // // We don't check the `work.drop_during_sync` here. We assume that if it made // it into the queue at any point then we should process it. None if can_spawn => { - let toolbox = Toolbox { - idle_tx: idle_tx.clone(), - work_reprocessing_tx: work_reprocessing_tx.clone(), - }; - // Check for chain segments first, they're the most efficient way to get // blocks into the system. if let Some(item) = chain_segment_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check sync blocks before gossip blocks, since we've already explicitly // requested these blocks. } else if let Some(item) = rpc_block_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check gossip blocks before gossip attestations, since a block might be // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us // more information with less signature verification time. @@ -1246,7 +857,7 @@ impl BeaconProcessor { if batch_size < 2 { // One single aggregate is in the queue, process it individually. if let Some(item) = aggregate_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } } else { // Collect two or more aggregates into a batch, so they can take @@ -1254,32 +865,45 @@ impl BeaconProcessor { // // Note: this will convert the `Work::GossipAggregate` item into a // `Work::GossipAggregateBatch` item. - let mut packages = Vec::with_capacity(batch_size); + let mut aggregates = Vec::with_capacity(batch_size); + let mut process_batch_opt = None; for _ in 0..batch_size { if let Some(item) = aggregate_queue.pop() { match item { Work::GossipAggregate { - message_id, - peer_id, aggregate, - seen_timestamp, + process_individual: _, + process_batch, } => { - packages.push(GossipAggregatePackage::new( - message_id, - peer_id, - aggregate, - seen_timestamp, - )); + aggregates.push(aggregate); + if process_batch_opt.is_none() { + process_batch_opt = Some(process_batch); + } } _ => { - error!(self.log, "Invalid item in aggregate queue") + error!(self.log, "Invalid item in aggregate queue"); } } } } - // Process all aggregates with a single worker. - self.spawn_worker(Work::GossipAggregateBatch { packages }, toolbox) + if let Some(process_batch) = process_batch_opt { + // Process all aggregates with a single worker. + self.spawn_worker( + Work::GossipAggregateBatch { + aggregates, + process_batch, + }, + idle_tx, + ) + } else { + // There is no good reason for this to + // happen, it is a serious logic error. + // Since we only form batches when multiple + // work items exist, we should always have a + // work closure at this point. + crit!(self.log, "Missing aggregate work"); + } } // Check the unaggregated attestation queue. // @@ -1293,7 +917,7 @@ impl BeaconProcessor { if batch_size < 2 { // One single attestation is in the queue, process it individually. if let Some(item) = attestation_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } } else { // Collect two or more attestations into a batch, so they can take @@ -1301,26 +925,20 @@ impl BeaconProcessor { // // Note: this will convert the `Work::GossipAttestation` item into a // `Work::GossipAttestationBatch` item. - let mut packages = Vec::with_capacity(batch_size); + let mut attestations = Vec::with_capacity(batch_size); + let mut process_batch_opt = None; for _ in 0..batch_size { if let Some(item) = attestation_queue.pop() { match item { Work::GossipAttestation { - message_id, - peer_id, attestation, - subnet_id, - should_import, - seen_timestamp, + process_individual: _, + process_batch, } => { - packages.push(GossipAttestationPackage::new( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - )); + attestations.push(attestation); + if process_batch_opt.is_none() { + process_batch_opt = Some(process_batch); + } } _ => error!( self.log, @@ -1330,54 +948,66 @@ impl BeaconProcessor { } } - // Process all attestations with a single worker. - self.spawn_worker( - Work::GossipAttestationBatch { packages }, - toolbox, - ) + if let Some(process_batch) = process_batch_opt { + // Process all attestations with a single worker. + self.spawn_worker( + Work::GossipAttestationBatch { + attestations, + process_batch, + }, + idle_tx, + ) + } else { + // There is no good reason for this to + // happen, it is a serious logic error. + // Since we only form batches when multiple + // work items exist, we should always have a + // work closure at this point. + crit!(self.log, "Missing attestations work"); + } } // Check sync committee messages after attestations as their rewards are lesser // and they don't influence fork choice. } else if let Some(item) = sync_contribution_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = sync_message_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Aggregates and unaggregates queued for re-processing are older and we // care about fresher ones, so check those first. } else if let Some(item) = unknown_block_aggregate_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = unknown_block_attestation_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check RPC methods next. Status messages are needed for sync so // prioritize them over syncing requests from other peers (BlocksByRange // and BlocksByRoot) } else if let Some(item) = status_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = bbrange_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = bbroots_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check slashings after all other consensus messages so we prioritize // following head. // // Check attester slashings before proposer slashings since they have the // potential to slash multiple validators at once. } else if let Some(item) = gossip_attester_slashing_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = gossip_proposer_slashing_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check exits and address changes late since our validators don't get // rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Handle backfill sync chain segments. } else if let Some(item) = backfill_chain_segment.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // This statement should always be the final else statement. } else if let Some(item) = lcbootstrap_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else { // Let the journal know that a worker is freed and there's nothing else // for it to do. @@ -1419,13 +1049,9 @@ impl BeaconProcessor { // it. Some(WorkEvent { work, .. }) => { let work_id = work.str_id(); - let toolbox = Toolbox { - idle_tx: idle_tx.clone(), - work_reprocessing_tx: work_reprocessing_tx.clone(), - }; match work { - _ if can_spawn => self.spawn_worker(work, toolbox), + _ if can_spawn => self.spawn_worker(work, idle_tx), Work::GossipAttestation { .. } => attestation_queue.push(work), // Attestation batches are formed internally within the // `BeaconProcessor`, they are not sent from external services. @@ -1467,16 +1093,15 @@ impl BeaconProcessor { Work::GossipLightClientOptimisticUpdate { .. } => { optimistic_update_queue.push(work, work_id, &self.log) } - Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), - Work::ChainSegment { ref process_id, .. } => match process_id { - ChainSegmentProcessId::RangeBatchId { .. } - | ChainSegmentProcessId::ParentLookup { .. } => { - chain_segment_queue.push(work, work_id, &self.log) - } - ChainSegmentProcessId::BackSyncBatchId { .. } => { - backfill_chain_segment.push(work, work_id, &self.log) - } - }, + Work::RpcBlock { .. } | Work::IgnoredRpcBlock { .. } => { + rpc_block_queue.push(work, work_id, &self.log) + } + Work::ChainSegment { .. } => { + chain_segment_queue.push(work, work_id, &self.log) + } + Work::ChainSegmentBackfill { .. } => { + backfill_chain_segment.push(work, work_id, &self.log) + } Work::Status { .. } => status_queue.push(work, work_id, &self.log), Work::BlocksByRangeRequest { .. } => { bbrange_queue.push(work, work_id, &self.log) @@ -1583,10 +1208,7 @@ impl BeaconProcessor { /// Spawns a blocking worker thread to process some `Work`. /// /// Sends an message on `idle_tx` when the work is complete and the task is stopping. - fn spawn_worker(&mut self, work: Work, toolbox: Toolbox) { - let idle_tx = toolbox.idle_tx; - let work_reprocessing_tx = toolbox.work_reprocessing_tx; - + fn spawn_worker(&mut self, work: Work, idle_tx: mpsc::Sender<()>) { let work_id = work.str_id(); let worker_timer = metrics::start_timer_vec(&metrics::BEACON_PROCESSOR_WORKER_TIME, &[work_id]); @@ -1609,27 +1231,8 @@ impl BeaconProcessor { let worker_id = self.current_workers; self.current_workers = self.current_workers.saturating_add(1); - let chain = if let Some(chain) = self.beacon_chain.upgrade() { - chain - } else { - debug!( - self.log, - "Beacon chain dropped, shutting down"; - ); - return; - }; - let executor = self.executor.clone(); - let worker = Worker { - chain, - network_tx: self.network_tx.clone(), - sync_tx: self.sync_tx.clone(), - log: self.log.clone(), - }; - - let duplicate_cache = self.importing_blocks.clone(); - trace!( self.log, "Spawning beacon processor worker"; @@ -1638,349 +1241,72 @@ impl BeaconProcessor { ); let task_spawner = TaskSpawner { - executor: executor.clone(), + executor, send_idle_on_drop, }; - let sub_executor = executor; match work { - /* - * Individual unaggregated attestation verification. - */ Work::GossipAttestation { - message_id, - peer_id, attestation, - subnet_id, - should_import, - seen_timestamp, + process_individual, + process_batch: _, } => task_spawner.spawn_blocking(move || { - worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - Some(work_reprocessing_tx), - seen_timestamp, - ) + process_individual(attestation); }), - /* - * Batched unaggregated attestation verification. - */ - Work::GossipAttestationBatch { packages } => task_spawner.spawn_blocking(|| { - worker.process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)) + Work::GossipAttestationBatch { + attestations, + process_batch, + } => task_spawner.spawn_blocking(move || { + process_batch(attestations); }), - /* - * Individual aggregated attestation verification. - */ Work::GossipAggregate { - message_id, - peer_id, aggregate, - seen_timestamp, + process_individual, + process_batch: _, } => task_spawner.spawn_blocking(move || { - worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - Some(work_reprocessing_tx), - seen_timestamp, - ) + process_individual(aggregate); }), - /* - * Batched aggregated attestation verification. - */ - Work::GossipAggregateBatch { packages } => task_spawner.spawn_blocking(|| { - worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) - }), - /* - * Verification for beacon blocks received on gossip. - */ - Work::GossipBlock { - message_id, - peer_id, - peer_client, - block, - seen_timestamp, - } => { - let invalid_block_storage = self.invalid_block_storage.clone(); - task_spawner.spawn_async(async move { - worker - .process_gossip_block( - message_id, - peer_id, - peer_client, - block, - work_reprocessing_tx, - duplicate_cache, - invalid_block_storage, - seen_timestamp, - ) - .await - }) - } - /* - * Import for blocks that we received earlier than their intended slot. - */ - Work::DelayedImportBlock { - peer_id, - block, - seen_timestamp, - } => { - let invalid_block_storage = self.invalid_block_storage.clone(); - task_spawner.spawn_async(worker.process_gossip_verified_block( - peer_id, - *block, - work_reprocessing_tx, - invalid_block_storage, - seen_timestamp, - )) - } - /* - * Voluntary exits received on gossip. - */ - Work::GossipVoluntaryExit { - message_id, - peer_id, - voluntary_exit, + Work::GossipAggregateBatch { + aggregates, + process_batch, } => task_spawner.spawn_blocking(move || { - worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit) + process_batch(aggregates); }), - /* - * Proposer slashings received on gossip. - */ - Work::GossipProposerSlashing { - message_id, - peer_id, - proposer_slashing, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_proposer_slashing(message_id, peer_id, *proposer_slashing) - }), - /* - * Attester slashings received on gossip. - */ - Work::GossipAttesterSlashing { - message_id, - peer_id, - attester_slashing, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_attester_slashing(message_id, peer_id, *attester_slashing) - }), - /* - * Sync committee message verification. - */ - Work::GossipSyncSignature { - message_id, - peer_id, - sync_signature, - subnet_id, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_sync_committee_signature( - message_id, - peer_id, - *sync_signature, - subnet_id, - seen_timestamp, - ) - }), - /* - * Sync contribution verification. - */ - Work::GossipSyncContribution { - message_id, - peer_id, - sync_contribution, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_sync_committee_contribution( - message_id, - peer_id, - *sync_contribution, - seen_timestamp, - ) - }), - /* - * BLS to execution change verification. - */ - Work::GossipBlsToExecutionChange { - message_id, - peer_id, - bls_to_execution_change, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_bls_to_execution_change( - message_id, - peer_id, - *bls_to_execution_change, - ) - }), - /* - * Light client finality update verification. - */ - Work::GossipLightClientFinalityUpdate { - message_id, - peer_id, - light_client_finality_update, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_finality_update( - message_id, - peer_id, - *light_client_finality_update, - seen_timestamp, - ) - }), - /* - * Light client optimistic update verification. - */ - Work::GossipLightClientOptimisticUpdate { - message_id, - peer_id, - light_client_optimistic_update, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_optimistic_update( - message_id, - peer_id, - *light_client_optimistic_update, - Some(work_reprocessing_tx), - seen_timestamp, - ) - }), - /* - * Verification for beacon blocks received during syncing via RPC. - */ - Work::RpcBlock { - block_root, - block, - seen_timestamp, - process_type, - should_process, - } => task_spawner.spawn_async(worker.process_rpc_block( - block_root, - block, - seen_timestamp, - process_type, - work_reprocessing_tx, - duplicate_cache, - should_process, - )), - /* - * Verification for a chain segment (multiple blocks). - */ - Work::ChainSegment { process_id, blocks } => { - let notify_execution_layer = if self - .network_globals - .sync_state - .read() - .is_syncing_finalized() - { - NotifyExecutionLayer::No - } else { - NotifyExecutionLayer::Yes - }; - - task_spawner.spawn_async(async move { - worker - .process_chain_segment(process_id, blocks, notify_execution_layer) - .await - }) - } - /* - * Processing of Status Messages. - */ - Work::Status { peer_id, message } => { - task_spawner.spawn_blocking(move || worker.process_status(peer_id, message)) - } - /* - * Processing of range syncing requests from other peers. - */ - Work::BlocksByRangeRequest { - peer_id, - request_id, - request, - } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { - worker.handle_blocks_by_range_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - }), - /* - * Processing of blocks by roots requests from other peers. - */ - Work::BlocksByRootsRequest { - peer_id, - request_id, - request, - } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { - worker.handle_blocks_by_root_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - }), - /* - * Processing of lightclient bootstrap requests from other peers. - */ - Work::LightClientBootstrapRequest { - peer_id, - request_id, - request, - } => task_spawner.spawn_blocking(move || { - worker.handle_light_client_bootstrap(peer_id, request_id, request) - }), - Work::UnknownBlockAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - None, // Do not allow this attestation to be re-processed beyond this point. - seen_timestamp, - ) - }), - Work::UnknownBlockAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - None, - seen_timestamp, - ) + Work::ChainSegment(process_fn) => task_spawner.spawn_async(async move { + process_fn.await; }), + Work::UnknownBlockAttestation { process_fn } => task_spawner.spawn_blocking(process_fn), + Work::UnknownBlockAggregate { process_fn } => task_spawner.spawn_blocking(process_fn), Work::UnknownLightClientOptimisticUpdate { - message_id, - peer_id, - light_client_optimistic_update, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_optimistic_update( - message_id, - peer_id, - *light_client_optimistic_update, - None, - seen_timestamp, - ) + parent_root: _, + process_fn, + } => task_spawner.spawn_blocking(process_fn), + Work::DelayedImportBlock { + beacon_block_slot: _, + beacon_block_root: _, + process_fn, + } => task_spawner.spawn_async(process_fn), + Work::RpcBlock { process_fn } => task_spawner.spawn_async(process_fn), + Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), + Work::GossipBlock(work) => task_spawner.spawn_async(async move { + work.await; }), + Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { + task_spawner.spawn_blocking_with_manual_send_idle(work) + } + Work::ChainSegmentBackfill(process_fn) => task_spawner.spawn_async(process_fn), + Work::GossipVoluntaryExit(process_fn) + | Work::GossipProposerSlashing(process_fn) + | Work::GossipAttesterSlashing(process_fn) + | Work::GossipSyncSignature(process_fn) + | Work::GossipSyncContribution(process_fn) + | Work::GossipLightClientFinalityUpdate(process_fn) + | Work::GossipLightClientOptimisticUpdate(process_fn) + | Work::Status(process_fn) + | Work::GossipBlsToExecutionChange(process_fn) + | Work::LightClientBootstrapRequest(process_fn) => { + task_spawner.spawn_blocking(process_fn) + } }; } } diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs new file mode 100644 index 0000000000..65ab0bd8fc --- /dev/null +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -0,0 +1,141 @@ +pub use lighthouse_metrics::*; + +lazy_static::lazy_static! { + + /* + * Gossip processor + */ + pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result = try_create_int_counter_vec( + "beacon_processor_work_events_rx_count", + "Count of work events received (but not necessarily processed)", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result = try_create_int_counter_vec( + "beacon_processor_work_events_ignored_count", + "Count of work events purposefully ignored", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result = try_create_int_counter_vec( + "beacon_processor_work_events_started_count", + "Count of work events which have been started by a worker", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORKER_TIME: Result = try_create_histogram_vec( + "beacon_processor_worker_time", + "Time taken for a worker to fully process some parcel of work.", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result = try_create_int_counter( + "beacon_processor_workers_spawned_total", + "The number of workers ever spawned by the gossip processing pool." + ); + pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_workers_active_total", + "Count of active workers in the gossip processing pool." + ); + pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result = try_create_int_counter( + "beacon_processor_idle_events_total", + "Count of idle events processed by the gossip processor manager." + ); + pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result = try_create_histogram( + "beacon_processor_event_handling_seconds", + "Time spent handling a new message and allocating it to a queue or worker." + ); + // Gossip blocks. + pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_gossip_block_queue_total", + "Count of blocks from gossip waiting to be verified." + ); + // Gossip Exits. + pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_exit_queue_total", + "Count of exits from gossip waiting to be verified." + ); + // Gossip proposer slashings. + pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_proposer_slashing_queue_total", + "Count of proposer slashings from gossip waiting to be verified." + ); + // Gossip attester slashings. + pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_attester_slashing_queue_total", + "Count of attester slashings from gossip waiting to be verified." + ); + // Gossip BLS to execution changes. + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_bls_to_execution_change_queue_total", + "Count of address changes from gossip waiting to be verified." + ); + // Rpc blocks. + pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_rpc_block_queue_total", + "Count of blocks from the rpc waiting to be verified." + ); + // Chain segments. + pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_chain_segment_queue_total", + "Count of chain segments from the rpc waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_backfill_chain_segment_queue_total", + "Count of backfill chain segments from the rpc waiting to be verified." + ); + // Unaggregated attestations. + pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_unaggregated_attestation_queue_total", + "Count of unagg. attestations waiting to be processed." + ); + // Aggregated attestations. + pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_aggregated_attestation_queue_total", + "Count of agg. attestations waiting to be processed." + ); + // Sync committee messages. + pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_sync_message_queue_total", + "Count of sync committee messages waiting to be processed." + ); + // Sync contribution. + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_sync_contribution_queue_total", + "Count of sync committee contributions waiting to be processed." + ); + + /* + * Attestation reprocessing queue metrics. + */ + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: Result = + try_create_int_gauge_vec( + "beacon_processor_reprocessing_queue_total", + "Count of items in a reprocessing queue.", + &["type"] + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_expired_attestations", + "Number of queued attestations which have expired before a matching block has been found." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_matched_attestations", + "Number of queued attestations where as matching block has been imported." + ); + + /* + * Light client update reprocessing queue metrics. + */ + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_expired_optimistic_updates", + "Number of queued light client optimistic updates which have expired before a matching block has been found." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_matched_optimistic_updates", + "Number of queued light client optimistic updates where as matching block has been imported." + ); + + /// Errors and Debugging Stats + pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result = + try_create_int_counter_vec( + "beacon_processor_send_error_per_work_type", + "Total number of beacon processor send error per work type", + &["type"] + ); +} diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs similarity index 85% rename from beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs rename to beacon_node/beacon_processor/src/work_reprocessing_queue.rs index 427be6d513..608f634d53 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -10,23 +10,18 @@ //! //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown //! block will be re-queued until their block is imported, or until they expire. -use super::MAX_SCHEDULED_WORK_QUEUE_LEN; -use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; use crate::metrics; -use crate::sync::manager::BlockProcessType; -use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use crate::{AsyncFn, BlockingFn, Work, WorkEvent}; use fnv::FnvHashMap; use futures::task::Poll; use futures::{Stream, StreamExt}; use itertools::Itertools; -use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::future::Future; use std::pin::Pin; -use std::sync::Arc; use std::task::Context; use std::time::Duration; use strum::AsRefStr; @@ -34,10 +29,7 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{ - Attestation, EthSpec, Hash256, LightClientOptimisticUpdate, SignedAggregateAndProof, - SignedBeaconBlock, SubnetId, -}; +use types::{EthSpec, Hash256, Slot}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; @@ -47,7 +39,7 @@ const LIGHT_CLIENT_UPDATES: &str = "lc_updates"; /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. /// This is to account for any slight drift in the system clock. -const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); +pub const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); @@ -84,12 +76,12 @@ pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [ /// Messages that the scheduler can receive. #[derive(AsRefStr)] -pub enum ReprocessQueueMessage { +pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. - EarlyBlock(QueuedGossipBlock), + EarlyBlock(QueuedGossipBlock), /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// hash until the gossip block is imported. - RpcBlock(QueuedRpcBlock), + RpcBlock(QueuedRpcBlock), /// A block that was successfully processed. We use this to handle attestations and light client updates /// for unknown blocks. BlockImported { @@ -97,139 +89,127 @@ pub enum ReprocessQueueMessage { parent_root: Hash256, }, /// An unaggregated attestation that references an unknown block. - UnknownBlockUnaggregate(QueuedUnaggregate), + UnknownBlockUnaggregate(QueuedUnaggregate), /// An aggregated attestation that references an unknown block. - UnknownBlockAggregate(QueuedAggregate), + UnknownBlockAggregate(QueuedAggregate), /// A light client optimistic update that references a parent root that has not been seen as a parent. - UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), + UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), /// A new backfill batch that needs to be scheduled for processing. - BackfillSync(QueuedBackfillBatch), + BackfillSync(QueuedBackfillBatch), } /// Events sent by the scheduler once they are ready for re-processing. -pub enum ReadyWork { - Block(QueuedGossipBlock), - RpcBlock(QueuedRpcBlock), - Unaggregate(QueuedUnaggregate), - Aggregate(QueuedAggregate), - LightClientUpdate(QueuedLightClientUpdate), - BackfillSync(QueuedBackfillBatch), +pub enum ReadyWork { + Block(QueuedGossipBlock), + RpcBlock(QueuedRpcBlock), + IgnoredRpcBlock(IgnoredRpcBlock), + Unaggregate(QueuedUnaggregate), + Aggregate(QueuedAggregate), + LightClientUpdate(QueuedLightClientUpdate), + BackfillSync(QueuedBackfillBatch), } /// An Attestation for which the corresponding block was not seen while processing, queued for /// later. -pub struct QueuedUnaggregate { - pub peer_id: PeerId, - pub message_id: MessageId, - pub attestation: Box>, - pub subnet_id: SubnetId, - pub should_import: bool, - pub seen_timestamp: Duration, +pub struct QueuedUnaggregate { + pub beacon_block_root: Hash256, + pub process_fn: BlockingFn, } /// An aggregated attestation for which the corresponding block was not seen while processing, queued for /// later. -pub struct QueuedAggregate { - pub peer_id: PeerId, - pub message_id: MessageId, - pub attestation: Box>, - pub seen_timestamp: Duration, +pub struct QueuedAggregate { + pub beacon_block_root: Hash256, + pub process_fn: BlockingFn, } /// A light client update for which the corresponding parent block was not seen while processing, /// queued for later. -pub struct QueuedLightClientUpdate { - pub peer_id: PeerId, - pub message_id: MessageId, - pub light_client_optimistic_update: Box>, +pub struct QueuedLightClientUpdate { pub parent_root: Hash256, - pub seen_timestamp: Duration, + pub process_fn: BlockingFn, } /// A block that arrived early and has been queued for later import. -pub struct QueuedGossipBlock { - pub peer_id: PeerId, - pub block: Box>, - pub seen_timestamp: Duration, +pub struct QueuedGossipBlock { + pub beacon_block_slot: Slot, + pub beacon_block_root: Hash256, + pub process_fn: AsyncFn, } /// A block that arrived for processing when the same block was being imported over gossip. /// It is queued for later import. -pub struct QueuedRpcBlock { - pub block_root: Hash256, - pub block: Arc>, - pub process_type: BlockProcessType, - pub seen_timestamp: Duration, - /// Indicates if the beacon chain should process this block or not. - /// We use this to ignore block processing when rpc block queues are full. - pub should_process: bool, +pub struct QueuedRpcBlock { + pub beacon_block_root: Hash256, + /// Processes/imports the block. + pub process_fn: AsyncFn, + /// Ignores the block. + pub ignore_fn: BlockingFn, +} + +/// A block that arrived for processing when the same block was being imported over gossip. +/// It is queued for later import. +pub struct IgnoredRpcBlock { + pub process_fn: BlockingFn, } /// A backfill batch work that has been queued for processing later. -#[derive(Clone)] -pub struct QueuedBackfillBatch { - pub process_id: ChainSegmentProcessId, - pub blocks: Vec>>, -} +pub struct QueuedBackfillBatch(pub AsyncFn); -impl TryFrom> for QueuedBackfillBatch { +impl TryFrom> for QueuedBackfillBatch { type Error = WorkEvent; fn try_from(event: WorkEvent) -> Result> { match event { WorkEvent { - work: - Work::ChainSegment { - process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_), - blocks, - }, + work: Work::ChainSegmentBackfill(process_fn), .. - } => Ok(QueuedBackfillBatch { process_id, blocks }), + } => Ok(QueuedBackfillBatch(process_fn)), _ => Err(event), } } } -impl From> for WorkEvent { - fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent { - WorkEvent::chain_segment( - queued_backfill_batch.process_id, - queued_backfill_batch.blocks, - ) +impl From for WorkEvent { + fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent { + WorkEvent { + drop_during_sync: false, + work: Work::ChainSegmentBackfill(queued_backfill_batch.0), + } } } /// Unifies the different messages processed by the block delay queue. -enum InboundEvent { +enum InboundEvent { /// A gossip block that was queued for later processing and is ready for import. - ReadyGossipBlock(QueuedGossipBlock), + ReadyGossipBlock(QueuedGossipBlock), /// A rpc block that was queued because the same gossip block was being imported /// will now be retried for import. - ReadyRpcBlock(QueuedRpcBlock), + ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), /// A light client update that is ready for re-processing. ReadyLightClientUpdate(QueuedLightClientUpdateId), /// A backfill batch that was queued is ready for processing. - ReadyBackfillSync(QueuedBackfillBatch), + ReadyBackfillSync(QueuedBackfillBatch), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` - Msg(ReprocessQueueMessage), + Msg(ReprocessQueueMessage), } /// Manages scheduling works that need to be later re-processed. -struct ReprocessQueue { +struct ReprocessQueue { /// Receiver of messages relevant to schedule works for reprocessing. - work_reprocessing_rx: Receiver>, + work_reprocessing_rx: Receiver, /// Sender of works once they become ready - ready_work_tx: Sender>, + ready_work_tx: Sender, /* Queues */ /// Queue to manage scheduled early blocks. - gossip_block_delay_queue: DelayQueue>, + gossip_block_delay_queue: DelayQueue, /// Queue to manage scheduled early blocks. - rpc_block_delay_queue: DelayQueue>, + rpc_block_delay_queue: DelayQueue, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, /// Queue to manage scheduled light client updates. @@ -239,17 +219,17 @@ struct ReprocessQueue { /// Queued blocks. queued_gossip_block_roots: HashSet, /// Queued aggregated attestations. - queued_aggregates: FnvHashMap, DelayKey)>, + queued_aggregates: FnvHashMap, /// Queued attestations. - queued_unaggregates: FnvHashMap, DelayKey)>, + queued_unaggregates: FnvHashMap, /// Attestations (aggregated and unaggregated) per root. awaiting_attestations_per_root: HashMap>, /// Queued Light Client Updates. - queued_lc_updates: FnvHashMap, DelayKey)>, + queued_lc_updates: FnvHashMap, /// Light Client Updates per parent_root. awaiting_lc_updates_per_parent_root: HashMap>, /// Queued backfill batches - queued_backfill_batches: Vec>, + queued_backfill_batches: Vec, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations @@ -260,7 +240,7 @@ struct ReprocessQueue { attestation_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch, next_backfill_batch_event: Option>>, - slot_clock: Pin>, + slot_clock: Pin>, } pub type QueuedLightClientUpdateId = usize; @@ -271,20 +251,20 @@ enum QueuedAttestationId { Unaggregate(usize), } -impl QueuedAggregate { +impl QueuedAggregate { pub fn beacon_block_root(&self) -> &Hash256 { - &self.attestation.message.aggregate.data.beacon_block_root + &self.beacon_block_root } } -impl QueuedUnaggregate { +impl QueuedUnaggregate { pub fn beacon_block_root(&self) -> &Hash256 { - &self.attestation.data.beacon_block_root + &self.beacon_block_root } } -impl Stream for ReprocessQueue { - type Item = InboundEvent; +impl Stream for ReprocessQueue { + type Item = InboundEvent; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // NOTE: implementing `Stream` is not necessary but allows to maintain the future selection @@ -375,16 +355,13 @@ impl Stream for ReprocessQueue { /// Starts the job that manages scheduling works that need re-processing. The returned `Sender` /// gives the communicating channel to receive those works. Once a work is ready, it is sent back /// via `ready_work_tx`. -pub fn spawn_reprocess_scheduler( - ready_work_tx: Sender>, +pub fn spawn_reprocess_scheduler( + ready_work_tx: Sender, + work_reprocessing_rx: Receiver, executor: &TaskExecutor, - slot_clock: T::SlotClock, + slot_clock: S, log: Logger, -) -> Sender> { - let (work_reprocessing_tx, work_reprocessing_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); - // Basic sanity check. - assert!(ADDITIONAL_QUEUED_BLOCK_DELAY < MAXIMUM_GOSSIP_CLOCK_DISPARITY); - +) { let mut queue = ReprocessQueue { work_reprocessing_rx, ready_work_tx, @@ -423,19 +400,17 @@ pub fn spawn_reprocess_scheduler( }, TASK_NAME, ); - - work_reprocessing_tx } -impl ReprocessQueue { - fn handle_message(&mut self, msg: InboundEvent, slot_clock: &T::SlotClock, log: &Logger) { +impl ReprocessQueue { + fn handle_message(&mut self, msg: InboundEvent, slot_clock: &S, log: &Logger) { use ReprocessQueueMessage::*; match msg { // Some block has been indicated as "early" and should be processed when the // appropriate slot arrives. InboundEvent::Msg(EarlyBlock(early_block)) => { - let block_slot = early_block.block.block.slot(); - let block_root = early_block.block.block_root; + let block_slot = early_block.beacon_block_slot; + let block_root = early_block.beacon_block_root; // Don't add the same block to the queue twice. This prevents DoS attacks. if self.queued_gossip_block_roots.contains(&block_root) { @@ -494,7 +469,7 @@ impl ReprocessQueue { // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY` // and then send the rpc block back for processing assuming the gossip import // has completed by then. - InboundEvent::Msg(RpcBlock(mut rpc_block)) => { + InboundEvent::Msg(RpcBlock(rpc_block)) => { // Check to ensure this won't over-fill the queue. if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { if self.rpc_block_debounce.elapsed() { @@ -507,10 +482,11 @@ impl ReprocessQueue { } // Return the block to the beacon processor signalling to // ignore processing for this block - rpc_block.should_process = false; if self .ready_work_tx - .try_send(ReadyWork::RpcBlock(rpc_block)) + .try_send(ReadyWork::IgnoredRpcBlock(IgnoredRpcBlock { + process_fn: rpc_block.ignore_fn, + })) .is_err() { error!( @@ -529,7 +505,7 @@ impl ReprocessQueue { debug!( log, "Sending rpc block for reprocessing"; - "block_root" => %queued_rpc_block.block.canonical_root() + "block_root" => %queued_rpc_block.beacon_block_root ); if self .ready_work_tx @@ -767,7 +743,7 @@ impl ReprocessQueue { } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { - let block_root = ready_block.block.block_root; + let block_root = ready_block.beacon_block_root; if !self.queued_gossip_block_roots.remove(&block_root) { // Log an error to alert that we've made a bad assumption about how this @@ -885,18 +861,28 @@ impl ReprocessQueue { "millis_from_slot_start" => millis_from_slot_start ); - if self + match self .ready_work_tx - .try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone())) - .is_err() + .try_send(ReadyWork::BackfillSync(queued_backfill_batch)) { - error!( + // The message was sent successfully. + Ok(()) => (), + // The message was not sent, recover it from the returned `Err`. + Err(mpsc::error::TrySendError::Full(ReadyWork::BackfillSync(batch))) + | Err(mpsc::error::TrySendError::Closed(ReadyWork::BackfillSync(batch))) => { + error!( + log, + "Failed to send scheduled backfill work"; + "info" => "sending work back to queue" + ); + self.queued_backfill_batches.insert(0, batch) + } + // The message was not sent and we didn't get the correct + // return result. This is a logic error. + _ => crit!( log, - "Failed to send scheduled backfill work"; - "info" => "sending work back to queue" - ); - self.queued_backfill_batches - .insert(0, queued_backfill_batch); + "Unexpected return from try_send error"; + ), } } } @@ -927,7 +913,7 @@ impl ReprocessQueue { // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue if !self.queued_backfill_batches.is_empty() { self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( - ReprocessQueue::::duration_until_next_backfill_batch_event(&self.slot_clock), + ReprocessQueue::::duration_until_next_backfill_batch_event(&self.slot_clock), ))); } else { self.next_backfill_batch_event = None @@ -936,7 +922,7 @@ impl ReprocessQueue { /// Returns duration until the next scheduled processing time. The schedule ensure that backfill /// processing is done in windows of time that aren't critical - fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration { + fn duration_until_next_backfill_batch_event(slot_clock: &S) -> Duration { let slot_duration = slot_clock.slot_duration(); slot_clock .millis_from_current_slot_start() @@ -966,16 +952,9 @@ impl ReprocessQueue { #[cfg(test)] mod tests { use super::*; - use beacon_chain::builder::Witness; - use beacon_chain::eth1_chain::CachingEth1Backend; use slot_clock::TestingSlotClock; - use store::MemoryStore; - use types::MainnetEthSpec as E; use types::Slot; - type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; - #[test] fn backfill_processing_schedule_calculation() { let slot_duration = Duration::from_secs(12); @@ -988,7 +967,7 @@ mod tests { for &event_duration_from_slot_start in event_times.iter() { let duration_to_next_event = - ReprocessQueue::::duration_until_next_backfill_batch_event( + ReprocessQueue::::duration_until_next_backfill_batch_event( &slot_clock, ); @@ -1005,7 +984,7 @@ mod tests { // check for next event beyond the current slot let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); let duration_to_next_event = - ReprocessQueue::::duration_until_next_backfill_batch_event( + ReprocessQueue::::duration_until_next_backfill_batch_event( &slot_clock, ); assert_eq!( diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 64c79ea668..72b6a6c7d4 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -43,3 +43,5 @@ slasher = { path = "../../slasher" } slasher_service = { path = "../../slasher/service" } monitoring_api = {path = "../../common/monitoring_api"} execution_layer = { path = "../execution_layer" } +beacon_processor = { path = "../beacon_processor" } +num_cpus = "1.13.0" diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index e05b92a277..f3ed7f65a9 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -13,6 +13,10 @@ use beacon_chain::{ store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, ServerSentEventHandler, }; +use beacon_processor::{ + work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessor, BeaconProcessorSend, + WorkEvent, MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN, +}; use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth2::{ @@ -27,12 +31,13 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; +use std::cmp; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; -use tokio::sync::oneshot; +use tokio::sync::{mpsc, oneshot}; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, @@ -72,6 +77,10 @@ pub struct ClientBuilder { http_metrics_config: http_metrics::Config, slasher: Option>>, eth_spec_instance: T::EthSpec, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_receive: mpsc::Receiver>, + work_reprocessing_tx: mpsc::Sender, + work_reprocessing_rx: mpsc::Receiver, } impl @@ -87,6 +96,10 @@ where /// /// The `eth_spec_instance` parameter is used to concretize `TEthSpec`. pub fn new(eth_spec_instance: TEthSpec) -> Self { + let (beacon_processor_send, beacon_processor_receive) = + mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + let (work_reprocessing_tx, work_reprocessing_rx) = + mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); Self { slot_clock: None, store: None, @@ -104,6 +117,10 @@ where http_metrics_config: <_>::default(), slasher: None, eth_spec_instance, + beacon_processor_send: BeaconProcessorSend(beacon_processor_send), + beacon_processor_receive, + work_reprocessing_tx, + work_reprocessing_rx, } } @@ -568,6 +585,8 @@ where gossipsub_registry .as_mut() .map(|registry| registry.sub_registry_with_prefix("gossipsub")), + self.beacon_processor_send.clone(), + self.work_reprocessing_tx.clone(), ) .await .map_err(|e| format!("Failed to start network: {:?}", e))?; @@ -755,6 +774,27 @@ where } if let Some(beacon_chain) = self.beacon_chain.as_ref() { + if let Some(network_globals) = &self.network_globals { + let beacon_processor_context = runtime_context.service_context("bproc".into()); + BeaconProcessor { + network_globals: network_globals.clone(), + executor: beacon_processor_context.executor.clone(), + max_workers: cmp::max(1, num_cpus::get()), + current_workers: 0, + enable_backfill_rate_limiting: beacon_chain + .config + .enable_backfill_rate_limiting, + log: beacon_processor_context.log().clone(), + } + .spawn_manager( + self.beacon_processor_receive, + self.work_reprocessing_tx, + self.work_reprocessing_rx, + None, + beacon_chain.slot_clock.clone(), + ); + } + let state_advance_context = runtime_context.service_context("state_advance".into()); let state_advance_log = state_advance_context.log().clone(); spawn_state_advance_timer( diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index aa1827787c..a5cc12bbc5 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -11,7 +11,6 @@ matches = "0.1.8" exit-future = "0.2.0" slog-term = "2.6.0" slog-async = "2.5.0" -environment = { path = "../../lighthouse/environment" } [dependencies] beacon_chain = { path = "../beacon_chain" } @@ -46,4 +45,7 @@ derivative = "2.2.0" delay_map = "0.3.0" ethereum-types = { version = "0.14.1", optional = true } operation_pool = { path = "../operation_pool" } -execution_layer = { path = "../execution_layer" } \ No newline at end of file +execution_layer = { path = "../execution_layer" } +beacon_processor = { path = "../beacon_processor" } +parking_lot = "0.12.0" +environment = { path = "../../lighthouse/environment" } \ No newline at end of file diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs deleted file mode 100644 index 1cbc64b632..0000000000 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ /dev/null @@ -1,51 +0,0 @@ -use super::work_reprocessing_queue::ReprocessQueueMessage; -use crate::{service::NetworkMessage, sync::SyncMessage}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::{debug, Logger}; -use std::sync::Arc; -use tokio::sync::mpsc; - -mod gossip_methods; -mod rpc_methods; -mod sync_methods; - -pub use gossip_methods::{GossipAggregatePackage, GossipAttestationPackage}; -pub use sync_methods::ChainSegmentProcessId; - -pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; - -/// Contains the context necessary to import blocks, attestations, etc to the beacon chain. -pub struct Worker { - pub chain: Arc>, - pub network_tx: mpsc::UnboundedSender>, - pub sync_tx: mpsc::UnboundedSender>, - pub log: Logger, -} - -impl Worker { - /// Send a message to `sync_tx`. - /// - /// Creates a log if there is an internal error. - fn send_sync_message(&self, message: SyncMessage) { - self.sync_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the sync service"; - "error" => %e) - }); - } - - /// Send a message to `network_tx`. - /// - /// Creates a log if there is an internal error. - fn send_network_message(&self, message: NetworkMessage) { - self.network_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the network service. Likely shutdown"; - "error" => %e) - }); - } -} - -/// Contains the necessary items for a worker to do their job. -pub struct Toolbox { - pub idle_tx: mpsc::Sender<()>, - pub work_reprocessing_tx: mpsc::Sender>, -} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 648c636acc..da64368b16 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -6,10 +6,10 @@ pub mod error; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod service; -mod beacon_processor; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod metrics; mod nat; +mod network_beacon_processor; mod persisted_dht; mod router; mod status; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 27d7dc9625..0144824861 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -49,47 +49,8 @@ lazy_static! { /* * Gossip processor */ - pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result = try_create_int_counter_vec( - "beacon_processor_work_events_rx_count", - "Count of work events received (but not necessarily processed)", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result = try_create_int_counter_vec( - "beacon_processor_work_events_ignored_count", - "Count of work events purposefully ignored", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result = try_create_int_counter_vec( - "beacon_processor_work_events_started_count", - "Count of work events which have been started by a worker", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORKER_TIME: Result = try_create_histogram_vec( - "beacon_processor_worker_time", - "Time taken for a worker to fully process some parcel of work.", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result = try_create_int_counter( - "beacon_processor_workers_spawned_total", - "The number of workers ever spawned by the gossip processing pool." - ); - pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_workers_active_total", - "Count of active workers in the gossip processing pool." - ); - pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result = try_create_int_counter( - "beacon_processor_idle_events_total", - "Count of idle events processed by the gossip processor manager." - ); - pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result = try_create_histogram( - "beacon_processor_event_handling_seconds", - "Time spent handling a new message and allocating it to a queue or worker." - ); + // Gossip blocks. - pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_gossip_block_queue_total", - "Count of blocks from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_gossip_block_verified_total", "Total number of gossip blocks verified for propagation." @@ -107,10 +68,6 @@ lazy_static! { "Whenever a gossip block is received early this metrics is set to how early that block was." ); // Gossip Exits. - pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_exit_queue_total", - "Count of exits from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_exit_verified_total", "Total number of voluntary exits verified for propagation." @@ -120,10 +77,6 @@ lazy_static! { "Total number of voluntary exits imported to the op pool." ); // Gossip proposer slashings. - pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_proposer_slashing_queue_total", - "Count of proposer slashings from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_proposer_slashing_verified_total", "Total number of proposer slashings verified for propagation." @@ -133,10 +86,6 @@ lazy_static! { "Total number of proposer slashings imported to the op pool." ); // Gossip attester slashings. - pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_attester_slashing_queue_total", - "Count of attester slashings from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_attester_slashing_verified_total", "Total number of attester slashings verified for propagation." @@ -146,10 +95,6 @@ lazy_static! { "Total number of attester slashings imported to the op pool." ); // Gossip BLS to execution changes. - pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_bls_to_execution_change_queue_total", - "Count of address changes from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_bls_to_execution_change_verified_total", "Total number of address changes verified for propagation." @@ -159,23 +104,11 @@ lazy_static! { "Total number of address changes imported to the op pool." ); // Rpc blocks. - pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_rpc_block_queue_total", - "Count of blocks from the rpc waiting to be verified." - ); pub static ref BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL: Result = try_create_int_counter( "beacon_processor_rpc_block_imported_total", "Total number of gossip blocks imported to fork choice, etc." ); // Chain segments. - pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_chain_segment_queue_total", - "Count of chain segments from the rpc waiting to be verified." - ); - pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_backfill_chain_segment_queue_total", - "Count of backfill chain segments from the rpc waiting to be verified." - ); pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result = try_create_int_counter( "beacon_processor_chain_segment_success_total", "Total number of chain segments successfully processed." @@ -193,10 +126,6 @@ lazy_static! { "Total number of backfill chain segments that failed processing." ); // Unaggregated attestations. - pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_unaggregated_attestation_queue_total", - "Count of unagg. attestations waiting to be processed." - ); pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_unaggregated_attestation_verified_total", "Total number of unaggregated attestations verified for gossip." @@ -210,10 +139,6 @@ lazy_static! { "Total number of unaggregated attestations that referenced an unknown block and were re-queued." ); // Aggregated attestations. - pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_aggregated_attestation_queue_total", - "Count of agg. attestations waiting to be processed." - ); pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_aggregated_attestation_verified_total", "Total number of aggregated attestations verified for gossip." @@ -227,10 +152,6 @@ lazy_static! { "Total number of aggregated attestations that referenced an unknown block and were re-queued." ); // Sync committee messages. - pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_sync_message_queue_total", - "Count of sync committee messages waiting to be processed." - ); pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_sync_message_verified_total", "Total number of sync committee messages verified for gossip." @@ -240,10 +161,6 @@ lazy_static! { "Total number of sync committee messages imported to fork choice, etc." ); // Sync contribution. - pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_sync_contribution_queue_total", - "Count of sync committee contributions waiting to be processed." - ); pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_sync_contribution_verified_total", "Total number of sync committee contributions verified for gossip." @@ -279,12 +196,6 @@ lazy_static! { "Gossipsub light_client_optimistic_update errors per error type", &["type"] ); - pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result = - try_create_int_counter_vec( - "beacon_processor_send_error_per_work_type", - "Total number of beacon processor send error per work type", - &["type"] - ); /* @@ -371,35 +282,9 @@ lazy_static! { "Count of times when a gossip block arrived from the network later than the attestation deadline.", ); - /* - * Attestation reprocessing queue metrics. - */ - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: Result = - try_create_int_gauge_vec( - "beacon_processor_reprocessing_queue_total", - "Count of items in a reprocessing queue.", - &["type"] - ); - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result = try_create_int_counter( - "beacon_processor_reprocessing_queue_expired_attestations", - "Number of queued attestations which have expired before a matching block has been found." - ); - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result = try_create_int_counter( - "beacon_processor_reprocessing_queue_matched_attestations", - "Number of queued attestations where as matching block has been imported." - ); - /* * Light client update reprocessing queue metrics. */ - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( - "beacon_processor_reprocessing_queue_expired_optimistic_updates", - "Number of queued light client optimistic updates which have expired before a matching block has been found." - ); - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( - "beacon_processor_reprocessing_queue_matched_optimistic_updates", - "Number of queued light client optimistic updates where as matching block has been imported." - ); pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: Result = try_create_int_counter( "beacon_processor_reprocessing_queue_sent_optimistic_updates", "Number of queued light client optimistic updates where as matching block has been imported." diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs similarity index 95% rename from beacon_node/network/src/beacon_processor/worker/gossip_methods.rs rename to beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 91ec81b18d..cde4da9ffc 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1,4 +1,9 @@ -use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; +use crate::{ + metrics, + network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}, + service::NetworkMessage, + sync::SyncMessage, +}; use beacon_chain::store::Error; use beacon_chain::{ @@ -30,14 +35,13 @@ use types::{ SyncCommitteeMessage, SyncSubnetId, }; -use super::{ - super::work_reprocessing_queue::{ +use beacon_processor::{ + work_reprocessing_queue::{ QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, ReprocessQueueMessage, }, - Worker, + DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, }; -use crate::beacon_processor::{DuplicateCache, InvalidBlockStorage}; /// Set to `true` to introduce stricter penalties for peers who send some types of late consensus /// messages. @@ -144,65 +148,7 @@ impl FailedAtt { } } -/// Items required to verify a batch of unaggregated gossip attestations. -#[derive(Debug)] -pub struct GossipAttestationPackage { - message_id: MessageId, - peer_id: PeerId, - attestation: Box>, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, -} - -impl GossipAttestationPackage { - pub fn new( - message_id: MessageId, - peer_id: PeerId, - attestation: Box>, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, - ) -> Self { - Self { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } - } -} - -/// Items required to verify a batch of aggregated gossip attestations. -#[derive(Debug)] -pub struct GossipAggregatePackage { - message_id: MessageId, - peer_id: PeerId, - aggregate: Box>, - beacon_block_root: Hash256, - seen_timestamp: Duration, -} - -impl GossipAggregatePackage { - pub fn new( - message_id: MessageId, - peer_id: PeerId, - aggregate: Box>, - seen_timestamp: Duration, - ) -> Self { - Self { - message_id, - peer_id, - beacon_block_root: aggregate.message.aggregate.data.beacon_block_root, - aggregate, - seen_timestamp, - } - } -} - -impl Worker { +impl NetworkBeaconProcessor { /* Auxiliary functions */ /// Penalizes a peer for misbehaviour. @@ -245,13 +191,13 @@ impl Worker { /// Raises a log if there are errors. #[allow(clippy::too_many_arguments)] pub fn process_gossip_attestation( - self, + self: Arc, message_id: MessageId, peer_id: PeerId, attestation: Box>, subnet_id: SubnetId, should_import: bool, - reprocess_tx: Option>>, + reprocess_tx: Option>, seen_timestamp: Duration, ) { let result = match self @@ -277,9 +223,9 @@ impl Worker { } pub fn process_gossip_attestation_batch( - self, + self: Arc, packages: Vec>, - reprocess_tx: Option>>, + reprocess_tx: Option>, ) { let attestations_and_subnets = packages .iter() @@ -348,12 +294,12 @@ impl Worker { // cant' be mixed-up) and creating a struct would result in more complexity. #[allow(clippy::too_many_arguments)] fn process_gossip_attestation_result( - &self, + self: &Arc, result: Result, RejectedUnaggregate>, message_id: MessageId, peer_id: PeerId, subnet_id: SubnetId, - reprocess_tx: Option>>, + reprocess_tx: Option>, should_import: bool, seen_timestamp: Duration, ) { @@ -456,11 +402,11 @@ impl Worker { /// /// Raises a log if there are errors. pub fn process_gossip_aggregate( - self, + self: Arc, message_id: MessageId, peer_id: PeerId, aggregate: Box>, - reprocess_tx: Option>>, + reprocess_tx: Option>, seen_timestamp: Duration, ) { let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root; @@ -490,9 +436,9 @@ impl Worker { } pub fn process_gossip_aggregate_batch( - self, + self: Arc, packages: Vec>, - reprocess_tx: Option>>, + reprocess_tx: Option>, ) { let aggregates = packages.iter().map(|package| package.aggregate.as_ref()); @@ -555,12 +501,12 @@ impl Worker { } fn process_gossip_aggregate_result( - &self, + self: &Arc, result: Result, RejectedAggregate>, beacon_block_root: Hash256, message_id: MessageId, peer_id: PeerId, - reprocess_tx: Option>>, + reprocess_tx: Option>, seen_timestamp: Duration, ) { match result { @@ -659,12 +605,12 @@ impl Worker { /// Raises a log if there are errors. #[allow(clippy::too_many_arguments)] pub async fn process_gossip_block( - self, + self: Arc, message_id: MessageId, peer_id: PeerId, peer_client: Client, block: Arc>, - reprocess_tx: mpsc::Sender>, + reprocess_tx: mpsc::Sender, duplicate_cache: DuplicateCache, invalid_block_storage: InvalidBlockStorage, seen_duration: Duration, @@ -708,12 +654,12 @@ impl Worker { /// /// Returns the `GossipVerifiedBlock` if verification passes and raises a log if there are errors. pub async fn process_gossip_unverified_block( - &self, + self: &Arc, message_id: MessageId, peer_id: PeerId, peer_client: Client, block: Arc>, - reprocess_tx: mpsc::Sender>, + reprocess_tx: mpsc::Sender, seen_duration: Duration, ) -> Option> { let block_delay = @@ -911,11 +857,25 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_REQUEUED_TOTAL); + let inner_self = self.clone(); + let process_fn = Box::pin(async move { + let reprocess_tx = inner_self.reprocess_tx.clone(); + let invalid_block_storage = inner_self.invalid_block_storage.clone(); + inner_self + .process_gossip_verified_block( + peer_id, + verified_block, + reprocess_tx, + invalid_block_storage, + seen_duration, + ) + .await; + }); if reprocess_tx .try_send(ReprocessQueueMessage::EarlyBlock(QueuedGossipBlock { - peer_id, - block: Box::new(verified_block), - seen_timestamp: seen_duration, + beacon_block_slot: block_slot, + beacon_block_root: block_root, + process_fn, })) .is_err() { @@ -948,10 +908,10 @@ impl Worker { /// /// Raises a log if there are errors. pub async fn process_gossip_verified_block( - self, + self: Arc, peer_id: PeerId, verified_block: GossipVerifiedBlock, - reprocess_tx: mpsc::Sender>, + reprocess_tx: mpsc::Sender, invalid_block_storage: InvalidBlockStorage, // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, @@ -1051,7 +1011,7 @@ impl Worker { } pub fn process_gossip_voluntary_exit( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, voluntary_exit: SignedVoluntaryExit, @@ -1109,7 +1069,7 @@ impl Worker { } pub fn process_gossip_proposer_slashing( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, proposer_slashing: ProposerSlashing, @@ -1171,7 +1131,7 @@ impl Worker { } pub fn process_gossip_attester_slashing( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, attester_slashing: AttesterSlashing, @@ -1225,7 +1185,7 @@ impl Worker { } pub fn process_gossip_bls_to_execution_change( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, bls_to_execution_change: SignedBlsToExecutionChange, @@ -1308,7 +1268,7 @@ impl Worker { /// /// Raises a log if there are errors. pub fn process_gossip_sync_committee_signature( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, sync_signature: SyncCommitteeMessage, @@ -1371,7 +1331,7 @@ impl Worker { /// /// Raises a log if there are errors. pub fn process_sync_committee_contribution( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, sync_contribution: SignedContributionAndProof, @@ -1426,7 +1386,7 @@ impl Worker { } pub fn process_gossip_finality_update( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, light_client_finality_update: LightClientFinalityUpdate, @@ -1492,11 +1452,11 @@ impl Worker { } pub fn process_gossip_optimistic_update( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, light_client_optimistic_update: LightClientOptimisticUpdate, - reprocess_tx: Option>>, + reprocess_tx: Option>, seen_timestamp: Duration, ) { match self.chain.verify_optimistic_update_for_gossip( @@ -1527,15 +1487,19 @@ impl Worker { ); if let Some(sender) = reprocess_tx { + let processor = self.clone(); let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate( QueuedLightClientUpdate { - peer_id, - message_id, - light_client_optimistic_update: Box::new( - light_client_optimistic_update, - ), parent_root, - seen_timestamp, + process_fn: Box::new(move || { + processor.process_gossip_optimistic_update( + message_id, + peer_id, + light_client_optimistic_update, + None, // Do not reprocess this message again. + seen_timestamp, + ) + }), }, ); @@ -1624,11 +1588,11 @@ impl Worker { /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the /// network. fn handle_attestation_verification_failure( - &self, + self: &Arc, peer_id: PeerId, message_id: MessageId, failed_att: FailedAtt, - reprocess_tx: Option>>, + reprocess_tx: Option>, error: AttnError, seen_timestamp: Duration, ) { @@ -1860,11 +1824,18 @@ impl Worker { metrics::inc_counter( &metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_REQUEUED_TOTAL, ); + let processor = self.clone(); ReprocessQueueMessage::UnknownBlockAggregate(QueuedAggregate { - peer_id, - message_id, - attestation, - seen_timestamp, + beacon_block_root: *beacon_block_root, + process_fn: Box::new(move || { + processor.process_gossip_aggregate( + message_id, + peer_id, + attestation, + None, // Do not allow this attestation to be re-processed beyond this point. + seen_timestamp, + ) + }), }) } FailedAtt::Unaggregate { @@ -1876,13 +1847,20 @@ impl Worker { metrics::inc_counter( &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_REQUEUED_TOTAL, ); + let processor = self.clone(); ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate { - peer_id, - message_id, - attestation, - subnet_id, - should_import, - seen_timestamp, + beacon_block_root: *beacon_block_root, + process_fn: Box::new(move || { + processor.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + None, // Do not allow this attestation to be re-processed beyond this point. + seen_timestamp, + ) + }), }) } }; diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs new file mode 100644 index 0000000000..7f0ef1fb81 --- /dev/null +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -0,0 +1,590 @@ +use crate::{ + service::NetworkMessage, + sync::{manager::BlockProcessType, SyncMessage}, +}; +use beacon_chain::{ + builder::Witness, eth1_chain::CachingEth1Backend, test_utils::BeaconChainHarness, BeaconChain, +}; +use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; +use beacon_processor::{ + work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, + GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, + MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN, +}; +use environment::null_logger; +use lighthouse_network::{ + rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, + Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, +}; +use slog::{debug, Logger}; +use slot_clock::ManualSlotClock; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; +use store::MemoryStore; +use task_executor::test_utils::TestRuntime; +use task_executor::TaskExecutor; +use tokio::sync::mpsc::{self, error::TrySendError}; +use types::*; + +pub use sync_methods::ChainSegmentProcessId; + +pub type Error = TrySendError>; + +mod gossip_methods; +mod rpc_methods; +mod sync_methods; +mod tests; + +pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; + +/// Defines if and where we will store the SSZ files of invalid blocks. +#[derive(Clone)] +pub enum InvalidBlockStorage { + Enabled(PathBuf), + Disabled, +} + +/// Provides an interface to a `BeaconProcessor` running in some other thread. +/// The wider `networking` crate should use this struct to interface with the +/// beacon processor. +pub struct NetworkBeaconProcessor { + pub beacon_processor_send: BeaconProcessorSend, + pub duplicate_cache: DuplicateCache, + pub chain: Arc>, + pub network_tx: mpsc::UnboundedSender>, + pub sync_tx: mpsc::UnboundedSender>, + pub reprocess_tx: mpsc::Sender, + pub network_globals: Arc>, + pub invalid_block_storage: InvalidBlockStorage, + pub executor: TaskExecutor, + pub log: Logger, +} + +impl NetworkBeaconProcessor { + fn try_send(&self, event: BeaconWorkEvent) -> Result<(), Error> { + self.beacon_processor_send + .try_send(event) + .map_err(Into::into) + } + + /// Create a new `Work` event for some unaggregated attestation. + pub fn send_unaggregated_attestation( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + attestation: Attestation, + subnet_id: SubnetId, + should_import: bool, + seen_timestamp: Duration, + ) -> Result<(), Error> { + // Define a closure for processing individual attestations. + let processor = self.clone(); + let process_individual = move |package: GossipAttestationPackage| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_attestation( + package.message_id, + package.peer_id, + package.attestation, + package.subnet_id, + package.should_import, + Some(reprocess_tx), + package.seen_timestamp, + ) + }; + + // Define a closure for processing batches of attestations. + let processor = self.clone(); + let process_batch = move |attestations| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_attestation_batch(attestations, Some(reprocess_tx)) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipAttestation { + attestation: GossipAttestationPackage { + message_id, + peer_id, + attestation: Box::new(attestation), + subnet_id, + should_import, + seen_timestamp, + }, + process_individual: Box::new(process_individual), + process_batch: Box::new(process_batch), + }, + }) + } + + /// Create a new `Work` event for some aggregated attestation. + pub fn send_aggregated_attestation( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + aggregate: SignedAggregateAndProof, + seen_timestamp: Duration, + ) -> Result<(), Error> { + // Define a closure for processing individual attestations. + let processor = self.clone(); + let process_individual = move |package: GossipAggregatePackage| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_aggregate( + package.message_id, + package.peer_id, + package.aggregate, + Some(reprocess_tx), + package.seen_timestamp, + ) + }; + + // Define a closure for processing batches of attestations. + let processor = self.clone(); + let process_batch = move |aggregates| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_aggregate_batch(aggregates, Some(reprocess_tx)) + }; + + let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root; + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipAggregate { + aggregate: GossipAggregatePackage { + message_id, + peer_id, + aggregate: Box::new(aggregate), + beacon_block_root, + seen_timestamp, + }, + process_individual: Box::new(process_individual), + process_batch: Box::new(process_batch), + }, + }) + } + + /// Create a new `Work` event for some block. + pub fn send_gossip_beacon_block( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + block: Arc>, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + let reprocess_tx = processor.reprocess_tx.clone(); + let invalid_block_storage = processor.invalid_block_storage.clone(); + let duplicate_cache = processor.duplicate_cache.clone(); + processor + .process_gossip_block( + message_id, + peer_id, + peer_client, + block, + reprocess_tx, + duplicate_cache, + invalid_block_storage, + seen_timestamp, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipBlock(Box::pin(process_fn)), + }) + } + + /// Create a new `Work` event for some sync committee signature. + pub fn send_gossip_sync_signature( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + sync_signature: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_sync_committee_signature( + message_id, + peer_id, + sync_signature, + subnet_id, + seen_timestamp, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipSyncSignature(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some sync committee contribution. + pub fn send_gossip_sync_contribution( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + sync_contribution: SignedContributionAndProof, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_sync_committee_contribution( + message_id, + peer_id, + sync_contribution, + seen_timestamp, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipSyncContribution(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some exit. + pub fn send_gossip_voluntary_exit( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + voluntary_exit: Box, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipVoluntaryExit(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some proposer slashing. + pub fn send_gossip_proposer_slashing( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + proposer_slashing: Box, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_proposer_slashing(message_id, peer_id, *proposer_slashing) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipProposerSlashing(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some light client finality update. + pub fn send_gossip_light_client_finality_update( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: LightClientFinalityUpdate, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_finality_update( + message_id, + peer_id, + light_client_finality_update, + seen_timestamp, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipLightClientFinalityUpdate(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some light client optimistic update. + pub fn send_gossip_light_client_optimistic_update( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: LightClientOptimisticUpdate, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_optimistic_update( + message_id, + peer_id, + light_client_optimistic_update, + Some(reprocess_tx), + seen_timestamp, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipLightClientOptimisticUpdate(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some attester slashing. + pub fn send_gossip_attester_slashing( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + attester_slashing: Box>, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_attester_slashing(message_id, peer_id, *attester_slashing) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipAttesterSlashing(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some BLS to execution change. + pub fn send_gossip_bls_to_execution_change( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_bls_to_execution_change( + message_id, + peer_id, + *bls_to_execution_change, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipBlsToExecutionChange(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some block, where the result from computation (if any) is + /// sent to the other side of `result_tx`. + pub fn send_rpc_beacon_block( + self: &Arc, + block_root: Hash256, + block: Arc>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + block_root, + block, + seen_timestamp, + process_type, + ); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcBlock { process_fn }, + }) + } + + /// Create a new work event to import `blocks` as a beacon chain segment. + pub fn send_chain_segment( + self: &Arc, + process_id: ChainSegmentProcessId, + blocks: Vec>>, + ) -> Result<(), Error> { + let is_backfill = matches!(&process_id, ChainSegmentProcessId::BackSyncBatchId { .. }); + let processor = self.clone(); + let process_fn = async move { + let notify_execution_layer = if processor + .network_globals + .sync_state + .read() + .is_syncing_finalized() + { + NotifyExecutionLayer::No + } else { + NotifyExecutionLayer::Yes + }; + processor + .process_chain_segment(process_id, blocks, notify_execution_layer) + .await; + }; + let process_fn = Box::pin(process_fn); + + // Back-sync batches are dispatched with a different `Work` variant so + // they can be rate-limited. + let work = if is_backfill { + Work::ChainSegmentBackfill(process_fn) + } else { + Work::ChainSegment(process_fn) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work, + }) + } + + /// Create a new work event to process `StatusMessage`s from the RPC network. + pub fn send_status_message( + self: &Arc, + peer_id: PeerId, + message: StatusMessage, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || processor.process_status(peer_id, message); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::Status(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `BlocksByRangeRequest`s from the RPC network. + pub fn send_blocks_by_range_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlocksByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move |send_idle_on_drop| { + let executor = processor.executor.clone(); + processor.handle_blocks_by_range_request( + executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::BlocksByRangeRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `BlocksByRootRequest`s from the RPC network. + pub fn send_blocks_by_roots_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlocksByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move |send_idle_on_drop| { + let executor = processor.executor.clone(); + processor.handle_blocks_by_root_request( + executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::BlocksByRootsRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. + pub fn send_lightclient_bootstrap_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_light_client_bootstrap(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::LightClientBootstrapRequest(Box::new(process_fn)), + }) + } + + /// Send a message to `sync_tx`. + /// + /// Creates a log if there is an internal error. + fn send_sync_message(&self, message: SyncMessage) { + self.sync_tx.send(message).unwrap_or_else(|e| { + debug!(self.log, "Could not send message to the sync service"; + "error" => %e) + }); + } + + /// Send a message to `network_tx`. + /// + /// Creates a log if there is an internal error. + fn send_network_message(&self, message: NetworkMessage) { + self.network_tx.send(message).unwrap_or_else(|e| { + debug!(self.log, "Could not send message to the network service. Likely shutdown"; + "error" => %e) + }); + } +} + +type TestBeaconChainType = + Witness, E, MemoryStore, MemoryStore>; + +impl NetworkBeaconProcessor> { + // Instantiates a mostly non-functional version of `Self` and returns the + // event receiver that would normally go to the beacon processor. This is + // useful for testing that messages are actually being sent to the beacon + // processor (but not much else). + pub fn null_for_testing( + network_globals: Arc>, + ) -> (Self, mpsc::Receiver>) { + let (beacon_processor_send, beacon_processor_receive) = + mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + let (network_tx, _network_rx) = mpsc::unbounded_channel(); + let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); + let (reprocess_tx, _reprocess_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); + let log = null_logger().unwrap(); + let harness: BeaconChainHarness> = + BeaconChainHarness::builder(E::default()) + .spec(E::default_spec()) + .deterministic_keypairs(8) + .logger(log.clone()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + let runtime = TestRuntime::default(); + + let network_beacon_processor = Self { + beacon_processor_send: BeaconProcessorSend(beacon_processor_send), + duplicate_cache: DuplicateCache::default(), + chain: harness.chain, + network_tx, + sync_tx, + reprocess_tx, + network_globals, + invalid_block_storage: InvalidBlockStorage::Disabled, + executor: runtime.task_executor.clone(), + log, + }; + + (network_beacon_processor, beacon_processor_receive) + } +} + +#[cfg(test)] +mod test { + #[test] + fn queued_block_delay_is_sane() { + assert!( + beacon_processor::work_reprocessing_queue::ADDITIONAL_QUEUED_BLOCK_DELAY + < beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY + ); + } +} diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs similarity index 98% rename from beacon_node/network/src/beacon_processor/worker/rpc_methods.rs rename to beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 83baa0417b..19b0a60a43 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -1,21 +1,21 @@ -use crate::beacon_processor::{worker::FUTURE_SLOT_TOLERANCE, SendOnDrop}; +use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; +use beacon_processor::SendOnDrop; use itertools::process_results; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; use slot_clock::SlotClock; +use std::sync::Arc; use task_executor::TaskExecutor; use tokio_stream::StreamExt; use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; -use super::Worker; - -impl Worker { +impl NetworkBeaconProcessor { /* Auxiliary functions */ /// Disconnects and ban's a peer, sending a Goodbye request with the associated reason. @@ -124,7 +124,7 @@ impl Worker { /// Handle a `BlocksByRoot` request from the peer. pub fn handle_blocks_by_root_request( - self, + self: Arc, executor: TaskExecutor, send_on_drop: SendOnDrop, peer_id: PeerId, @@ -210,7 +210,7 @@ impl Worker { /// Handle a `BlocksByRoot` request from the peer. pub fn handle_light_client_bootstrap( - self, + self: &Arc, peer_id: PeerId, request_id: PeerRequestId, request: LightClientBootstrapRequest, @@ -283,7 +283,7 @@ impl Worker { /// Handle a `BlocksByRange` request from the peer. pub fn handle_blocks_by_range_request( - self, + self: Arc, executor: TaskExecutor, send_on_drop: SendOnDrop, peer_id: PeerId, diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs similarity index 90% rename from beacon_node/network/src/beacon_processor/worker/sync_methods.rs rename to beacon_node/network/src/network_beacon_processor/sync_methods.rs index ac59b1daa9..c33e2acf54 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1,17 +1,21 @@ use std::time::Duration; -use super::{super::work_reprocessing_queue::ReprocessQueueMessage, Worker}; -use crate::beacon_processor::work_reprocessing_queue::QueuedRpcBlock; -use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE; -use crate::beacon_processor::DuplicateCache; use crate::metrics; -use crate::sync::manager::{BlockProcessType, SyncMessage}; -use crate::sync::{BatchProcessResult, ChainId}; +use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; +use crate::sync::BatchProcessResult; +use crate::sync::{ + manager::{BlockProcessType, SyncMessage}, + ChainId, +}; use beacon_chain::{ observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; +use beacon_processor::{ + work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, + AsyncFn, BlockingFn, DuplicateCache, +}; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; use slot_clock::SlotClock; @@ -39,27 +43,71 @@ struct ChainSegmentFailed { peer_action: Option, } -impl Worker { - /// Attempt to process a block received from a direct RPC request. - #[allow(clippy::too_many_arguments)] - pub async fn process_rpc_block( - self, +impl NetworkBeaconProcessor { + /// Returns an async closure which processes a beacon block recieved via RPC. + /// + /// This separate function was required to prevent a cycle during compiler + /// type checking. + pub fn generate_rpc_beacon_block_process_fn( + self: Arc, block_root: Hash256, block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, - reprocess_tx: mpsc::Sender>, - duplicate_cache: DuplicateCache, - should_process: bool, - ) { - if !should_process { + ) -> AsyncFn { + let process_fn = async move { + let reprocess_tx = self.reprocess_tx.clone(); + let duplicate_cache = self.duplicate_cache.clone(); + self.process_rpc_block( + block_root, + block, + seen_timestamp, + process_type, + reprocess_tx, + duplicate_cache, + ) + .await; + }; + Box::pin(process_fn) + } + + /// Returns the `process_fn` and `ignore_fn` required when requeuing an RPC block. + pub fn generate_rpc_beacon_block_fns( + self: Arc, + block_root: Hash256, + block: Arc>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> (AsyncFn, BlockingFn) { + // An async closure which will import the block. + let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + block_root, + block, + seen_timestamp, + process_type.clone(), + ); + // A closure which will ignore the block. + let ignore_fn = move || { // Sync handles these results self.send_sync_message(SyncMessage::BlockProcessed { process_type, result: crate::sync::manager::BlockProcessResult::Ignored, }); - return; - } + }; + (process_fn, Box::new(ignore_fn)) + } + + /// Attempt to process a block received from a direct RPC request. + #[allow(clippy::too_many_arguments)] + pub async fn process_rpc_block( + self: Arc>, + block_root: Hash256, + block: Arc>, + seen_timestamp: Duration, + process_type: BlockProcessType, + reprocess_tx: mpsc::Sender, + duplicate_cache: DuplicateCache, + ) { // Check if the block is already being imported through another source let handle = match duplicate_cache.check_and_insert(block_root) { Some(handle) => handle, @@ -70,13 +118,18 @@ impl Worker { "action" => "sending rpc block to reprocessing queue", "block_root" => %block_root, ); + // Send message to work reprocess queue to retry the block - let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( block_root, - block: block.clone(), - process_type, + block, seen_timestamp, - should_process: true, + process_type, + ); + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + beacon_block_root: block_root, + process_fn, + ignore_fn, }); if reprocess_tx.try_send(reprocess_msg).is_err() { @@ -130,12 +183,16 @@ impl Worker { ); // Send message to work reprocess queue to retry the block - let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( block_root, - block: block.clone(), - process_type, + block, seen_timestamp, - should_process: true, + process_type, + ); + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + beacon_block_root: block_root, + process_fn, + ignore_fn, }); if reprocess_tx.try_send(reprocess_msg).is_err() { diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs similarity index 89% rename from beacon_node/network/src/beacon_processor/tests.rs rename to beacon_node/network/src/network_beacon_processor/tests.rs index b93e83ad78..b8d5db568e 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -1,20 +1,23 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(test)] -use crate::beacon_processor::work_reprocessing_queue::{ - QUEUED_ATTESTATION_DELAY, QUEUED_RPC_BLOCK_DELAY, +use crate::{ + network_beacon_processor::{ + ChainSegmentProcessId, DuplicateCache, InvalidBlockStorage, NetworkBeaconProcessor, + }, + service::NetworkMessage, + sync::{manager::BlockProcessType, SyncMessage}, }; -use crate::beacon_processor::*; -use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, ChainConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, - MessageId, NetworkGlobals, PeerId, + Client, MessageId, NetworkGlobals, PeerId, }; use slot_clock::SlotClock; use std::cmp; @@ -23,8 +26,8 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, Epoch, EthSpec, MainnetEthSpec, ProposerSlashing, - SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, Epoch, EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, SubnetId, }; type E = MainnetEthSpec; @@ -51,11 +54,12 @@ struct TestRig { attester_slashing: AttesterSlashing, proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, - beacon_processor_tx: mpsc::Sender>, + beacon_processor_tx: BeaconProcessorSend, work_journal_rx: mpsc::Receiver<&'static str>, _network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, duplicate_cache: DuplicateCache, + network_beacon_processor: Arc>, _harness: BeaconChainHarness, } @@ -64,7 +68,7 @@ struct TestRig { impl Drop for TestRig { fn drop(&mut self) { // Causes the beacon processor to shutdown. - self.beacon_processor_tx = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0; + self.beacon_processor_tx = BeaconProcessorSend(mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0); } } @@ -169,6 +173,7 @@ impl TestRig { let log = harness.logger().clone(); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + let beacon_processor_tx = BeaconProcessorSend(beacon_processor_tx); let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); // Default metadata @@ -191,22 +196,40 @@ impl TestRig { let executor = harness.runtime.task_executor.clone(); + let (work_reprocessing_tx, work_reprocessing_rx) = + mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); let duplicate_cache = DuplicateCache::default(); - BeaconProcessor { - beacon_chain: Arc::downgrade(&chain), + let network_beacon_processor = NetworkBeaconProcessor { + beacon_processor_send: beacon_processor_tx.clone(), + duplicate_cache: duplicate_cache.clone(), + chain: harness.chain.clone(), network_tx, sync_tx, + reprocess_tx: work_reprocessing_tx.clone(), + network_globals: network_globals.clone(), + invalid_block_storage: InvalidBlockStorage::Disabled, + executor: executor.clone(), + log: log.clone(), + }; + let network_beacon_processor = Arc::new(network_beacon_processor); + + BeaconProcessor { network_globals, executor, max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, - importing_blocks: duplicate_cache.clone(), - invalid_block_storage: InvalidBlockStorage::Disabled, + enable_backfill_rate_limiting: harness.chain.config.enable_backfill_rate_limiting, log: log.clone(), } - .spawn_manager(beacon_processor_rx, Some(work_journal_tx)); + .spawn_manager( + beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx, + Some(work_journal_tx), + harness.chain.slot_clock.clone(), + ); Self { chain, @@ -222,6 +245,7 @@ impl TestRig { _network_rx, _sync_rx, duplicate_cache, + network_beacon_processor, _harness: harness, } } @@ -235,102 +259,105 @@ impl TestRig { } pub fn enqueue_gossip_block(&self) { - self.beacon_processor_tx - .try_send(WorkEvent::gossip_beacon_block( + self.network_beacon_processor + .send_gossip_beacon_block( junk_message_id(), junk_peer_id(), Client::default(), self.next_block.clone(), Duration::from_secs(0), - )) + ) .unwrap(); } pub fn enqueue_rpc_block(&self) { - let event = WorkEvent::rpc_beacon_block( - self.next_block.canonical_root(), - self.next_block.clone(), - std::time::Duration::default(), - BlockProcessType::ParentLookup { - chain_hash: Hash256::random(), - }, - ); - self.beacon_processor_tx.try_send(event).unwrap(); + self.network_beacon_processor + .send_rpc_beacon_block( + self.next_block.canonical_root(), + self.next_block.clone(), + std::time::Duration::default(), + BlockProcessType::ParentLookup { + chain_hash: Hash256::random(), + }, + ) + .unwrap(); } pub fn enqueue_single_lookup_rpc_block(&self) { - let event = WorkEvent::rpc_beacon_block( - self.next_block.canonical_root(), - self.next_block.clone(), - std::time::Duration::default(), - BlockProcessType::SingleBlock { id: 1 }, - ); - self.beacon_processor_tx.try_send(event).unwrap(); + self.network_beacon_processor + .send_rpc_beacon_block( + self.next_block.canonical_root(), + self.next_block.clone(), + std::time::Duration::default(), + BlockProcessType::SingleBlock { id: 1 }, + ) + .unwrap(); } pub fn enqueue_backfill_batch(&self) { - let event = WorkEvent::chain_segment( - ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), - Vec::default(), - ); - self.beacon_processor_tx.try_send(event).unwrap(); + self.network_beacon_processor + .send_chain_segment( + ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), + Vec::default(), + ) + .unwrap(); } pub fn enqueue_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); - self.beacon_processor_tx - .try_send(WorkEvent::unaggregated_attestation( + self.network_beacon_processor + .send_unaggregated_attestation( junk_message_id(), junk_peer_id(), attestation, subnet_id, true, Duration::from_secs(0), - )) + ) .unwrap(); } pub fn enqueue_gossip_attester_slashing(&self) { - self.beacon_processor_tx - .try_send(WorkEvent::gossip_attester_slashing( + self.network_beacon_processor + .send_gossip_attester_slashing( junk_message_id(), junk_peer_id(), Box::new(self.attester_slashing.clone()), - )) + ) .unwrap(); } pub fn enqueue_gossip_proposer_slashing(&self) { - self.beacon_processor_tx - .try_send(WorkEvent::gossip_proposer_slashing( + self.network_beacon_processor + .send_gossip_proposer_slashing( junk_message_id(), junk_peer_id(), Box::new(self.proposer_slashing.clone()), - )) + ) .unwrap(); } pub fn enqueue_gossip_voluntary_exit(&self) { - self.beacon_processor_tx - .try_send(WorkEvent::gossip_voluntary_exit( + self.network_beacon_processor + .send_gossip_voluntary_exit( junk_message_id(), junk_peer_id(), Box::new(self.voluntary_exit.clone()), - )) + ) .unwrap(); } pub fn enqueue_next_block_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.next_block_attestations.first().unwrap().clone(); - self.beacon_processor_tx - .try_send(WorkEvent::unaggregated_attestation( + self.network_beacon_processor + .send_unaggregated_attestation( junk_message_id(), junk_peer_id(), attestation, subnet_id, true, Duration::from_secs(0), - )) + ) .unwrap(); } @@ -340,13 +367,13 @@ impl TestRig { .first() .unwrap() .clone(); - self.beacon_processor_tx - .try_send(WorkEvent::aggregated_attestation( + self.network_beacon_processor + .send_aggregated_attestation( junk_message_id(), junk_peer_id(), aggregate, Duration::from_secs(0), - )) + ) .unwrap(); } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 7a91f2d0b1..c8332705cf 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -5,16 +5,16 @@ //! syncing-related responses to the Sync manager. #![allow(clippy::unit_arg)] -use crate::beacon_processor::{ - BeaconProcessor, BeaconProcessorSend, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, - MAX_WORK_EVENT_QUEUE_LEN, -}; use crate::error; +use crate::network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}; use crate::service::{NetworkMessage, RequestId}; use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_processor::{ + work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, +}; use futures::prelude::*; use lighthouse_network::rpc::*; use lighthouse_network::{ @@ -23,7 +23,6 @@ use lighthouse_network::{ use logging::TimeLatch; use slog::{debug, o, trace}; use slog::{error, warn}; -use std::cmp; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; @@ -41,7 +40,7 @@ pub struct Router { /// A network context to return and handle RPC requests. network: HandlerNetworkContext, /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: BeaconProcessorSend, + network_beacon_processor: Arc>, /// The `Router` logger. log: slog::Logger, /// Provides de-bounce functionality for logging. @@ -80,12 +79,15 @@ pub enum RouterMessage { impl Router { /// Initializes and runs the Router. + #[allow(clippy::too_many_arguments)] pub fn spawn( beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, executor: task_executor::TaskExecutor, invalid_block_storage: InvalidBlockStorage, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_reprocess_tx: mpsc::Sender, log: slog::Logger, ) -> error::Result>> { let message_handler_log = log.new(o!("service"=> "router")); @@ -93,34 +95,33 @@ impl Router { let (handler_send, handler_recv) = mpsc::unbounded_channel(); - let (beacon_processor_send, beacon_processor_receive) = - mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); - let sync_logger = log.new(o!("service"=> "sync")); + // generate the message channel + let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); - // spawn the sync thread - let sync_send = crate::sync::manager::spawn( - executor.clone(), - beacon_chain.clone(), - network_globals.clone(), - network_send.clone(), - BeaconProcessorSend(beacon_processor_send.clone()), - sync_logger, - ); - - BeaconProcessor { - beacon_chain: Arc::downgrade(&beacon_chain), + let network_beacon_processor = NetworkBeaconProcessor { + beacon_processor_send, + duplicate_cache: DuplicateCache::default(), + chain: beacon_chain.clone(), network_tx: network_send.clone(), sync_tx: sync_send.clone(), + reprocess_tx: beacon_processor_reprocess_tx, network_globals: network_globals.clone(), - executor: executor.clone(), - max_workers: cmp::max(1, num_cpus::get()), - current_workers: 0, - importing_blocks: Default::default(), invalid_block_storage, + executor: executor.clone(), log: log.clone(), - } - .spawn_manager(beacon_processor_receive, None); + }; + let network_beacon_processor = Arc::new(network_beacon_processor); + + // spawn the sync thread + crate::sync::manager::spawn( + executor.clone(), + beacon_chain.clone(), + network_send.clone(), + network_beacon_processor.clone(), + sync_recv, + sync_logger, + ); // generate the Message handler let mut handler = Router { @@ -128,7 +129,7 @@ impl Router { chain: beacon_chain, sync_send, network: HandlerNetworkContext::new(network_send, log.clone()), - beacon_processor_send: BeaconProcessorSend(beacon_processor_send), + network_beacon_processor, log: message_handler_log, logger_debounce: TimeLatch::default(), }; @@ -197,14 +198,17 @@ impl Router { Request::Status(status_message) => { self.on_status_request(peer_id, request_id, status_message) } - Request::BlocksByRange(request) => self.send_beacon_processor_work( - BeaconWorkEvent::blocks_by_range_request(peer_id, request_id, request), + Request::BlocksByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_blocks_by_range_request(peer_id, request_id, request), ), - Request::BlocksByRoot(request) => self.send_beacon_processor_work( - BeaconWorkEvent::blocks_by_roots_request(peer_id, request_id, request), + Request::BlocksByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_blocks_by_roots_request(peer_id, request_id, request), ), - Request::LightClientBootstrap(request) => self.send_beacon_processor_work( - BeaconWorkEvent::lightclient_bootstrap_request(peer_id, request_id, request), + Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_lightclient_bootstrap_request(peer_id, request_id, request), ), } } @@ -219,10 +223,10 @@ impl Router { match response { Response::Status(status_message) => { debug!(self.log, "Received Status Response"; "peer_id" => %peer_id, &status_message); - self.send_beacon_processor_work(BeaconWorkEvent::status_message( - peer_id, - status_message, - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_status_message(peer_id, status_message), + ) } Response::BlocksByRange(beacon_block) => { self.on_blocks_by_range_response(peer_id, request_id, beacon_block); @@ -247,36 +251,40 @@ impl Router { ) { match gossip_message { PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => self - .send_beacon_processor_work(BeaconWorkEvent::aggregated_attestation( - message_id, - peer_id, - *aggregate_and_proof, - timestamp_now(), - )), - PubsubMessage::Attestation(subnet_attestation) => { - self.send_beacon_processor_work(BeaconWorkEvent::unaggregated_attestation( - message_id, - peer_id, - subnet_attestation.1, - subnet_attestation.0, - should_process, - timestamp_now(), - )) - } - PubsubMessage::BeaconBlock(block) => { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( + .handle_beacon_processor_send_result( + self.network_beacon_processor.send_aggregated_attestation( + message_id, + peer_id, + *aggregate_and_proof, + timestamp_now(), + ), + ), + PubsubMessage::Attestation(subnet_attestation) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor.send_unaggregated_attestation( + message_id, + peer_id, + subnet_attestation.1, + subnet_attestation.0, + should_process, + timestamp_now(), + ), + ), + PubsubMessage::BeaconBlock(block) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_beacon_block( message_id, peer_id, self.network_globals.client(&peer_id), block, timestamp_now(), - )) - } + ), + ), PubsubMessage::VoluntaryExit(exit) => { debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_voluntary_exit( - message_id, peer_id, exit, - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_gossip_voluntary_exit(message_id, peer_id, exit), + ) } PubsubMessage::ProposerSlashing(proposer_slashing) => { debug!( @@ -284,11 +292,13 @@ impl Router { "Received a proposer slashing"; "peer_id" => %peer_id ); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_proposer_slashing( - message_id, - peer_id, - proposer_slashing, - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_proposer_slashing( + message_id, + peer_id, + proposer_slashing, + ), + ) } PubsubMessage::AttesterSlashing(attester_slashing) => { debug!( @@ -296,11 +306,13 @@ impl Router { "Received a attester slashing"; "peer_id" => %peer_id ); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_attester_slashing( - message_id, - peer_id, - attester_slashing, - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_attester_slashing( + message_id, + peer_id, + attester_slashing, + ), + ) } PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { trace!( @@ -308,12 +320,14 @@ impl Router { "Received sync committee aggregate"; "peer_id" => %peer_id ); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_contribution( - message_id, - peer_id, - *contribution_and_proof, - timestamp_now(), - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_sync_contribution( + message_id, + peer_id, + *contribution_and_proof, + timestamp_now(), + ), + ) } PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { trace!( @@ -321,13 +335,15 @@ impl Router { "Received sync committee signature"; "peer_id" => %peer_id ); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_signature( - message_id, - peer_id, - sync_committtee_msg.1, - sync_committtee_msg.0, - timestamp_now(), - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_sync_signature( + message_id, + peer_id, + sync_committtee_msg.1, + sync_committtee_msg.0, + timestamp_now(), + ), + ) } PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { trace!( @@ -335,13 +351,14 @@ impl Router { "Received light client finality update"; "peer_id" => %peer_id ); - self.send_beacon_processor_work( - BeaconWorkEvent::gossip_light_client_finality_update( - message_id, - peer_id, - light_client_finality_update, - timestamp_now(), - ), + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_gossip_light_client_finality_update( + message_id, + peer_id, + *light_client_finality_update, + timestamp_now(), + ), ) } PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { @@ -350,21 +367,25 @@ impl Router { "Received light client optimistic update"; "peer_id" => %peer_id ); - self.send_beacon_processor_work( - BeaconWorkEvent::gossip_light_client_optimistic_update( - message_id, - peer_id, - light_client_optimistic_update, - timestamp_now(), - ), + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_gossip_light_client_optimistic_update( + message_id, + peer_id, + *light_client_optimistic_update, + timestamp_now(), + ), ) } PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => self - .send_beacon_processor_work(BeaconWorkEvent::gossip_bls_to_execution_change( - message_id, - peer_id, - bls_to_execution_change, - )), + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_gossip_bls_to_execution_change( + message_id, + peer_id, + bls_to_execution_change, + ), + ), } } @@ -415,7 +436,10 @@ impl Router { request_id, ); - self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_status_message(peer_id, status), + ) } /// Handle a `BlocksByRange` response from the peer. @@ -480,20 +504,22 @@ impl Router { }); } - fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { - self.beacon_processor_send - .try_send(work) - .unwrap_or_else(|e| { - let work_type = match &*e { - mpsc::error::TrySendError::Closed(work) - | mpsc::error::TrySendError::Full(work) => work.work_type(), - }; - - if self.logger_debounce.elapsed() { - error!(&self.log, "Unable to send message to the beacon processor"; - "error" => %e, "type" => work_type) + fn handle_beacon_processor_send_result( + &mut self, + result: Result<(), crate::network_beacon_processor::Error>, + ) { + if let Err(e) = result { + let work_type = match &e { + mpsc::error::TrySendError::Closed(work) | mpsc::error::TrySendError::Full(work) => { + work.work_type() } - }) + }; + + if self.logger_debounce.elapsed() { + error!(&self.log, "Unable to send message to the beacon processor"; + "error" => %e, "type" => work_type) + } + } } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 2c919233fc..c2719477f1 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,5 +1,5 @@ use super::sync::manager::RequestId as SyncId; -use crate::beacon_processor::InvalidBlockStorage; +use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::subnet_service::SyncCommitteeService; @@ -9,6 +9,7 @@ use crate::{ NetworkConfig, }; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; @@ -224,6 +225,8 @@ impl NetworkService { config: &NetworkConfig, executor: task_executor::TaskExecutor, gossipsub_registry: Option<&'_ mut Registry>, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_reprocess_tx: mpsc::Sender, ) -> error::Result<(Arc>, NetworkSenders)> { let network_log = executor.log().clone(); // build the channels for external comms @@ -311,6 +314,8 @@ impl NetworkService { network_senders.network_send(), executor.clone(), invalid_block_storage, + beacon_processor_send, + beacon_processor_reprocess_tx, network_log.clone(), )?; diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 83fcc8c9ac..9943da15a0 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -4,12 +4,15 @@ mod tests { use crate::persisted_dht::load_dht; use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; + use beacon_processor::{ + BeaconProcessorSend, MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN, + }; use lighthouse_network::Enr; use slog::{o, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; - use tokio::runtime::Runtime; + use tokio::{runtime::Runtime, sync::mpsc}; use types::MinimalEthSpec; fn get_logger(actual_log: bool) -> Logger { @@ -67,10 +70,20 @@ mod tests { // Create a new network service which implicitly gets dropped at the // end of the block. - let _network_service = - NetworkService::start(beacon_chain.clone(), &config, executor, None) - .await - .unwrap(); + let (beacon_processor_send, _beacon_processor_receive) = + mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + let (beacon_processor_reprocess_tx, _beacon_processor_reprocess_rx) = + mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); + let _network_service = NetworkService::start( + beacon_chain.clone(), + &config, + executor, + None, + BeaconProcessorSend(beacon_processor_send), + beacon_processor_reprocess_tx, + ) + .await + .unwrap(); drop(signal); }); diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 9f676ba017..a1c2404e5e 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -8,7 +8,7 @@ //! If a batch fails, the backfill sync cannot progress. In this scenario, we mark the backfill //! sync as failed, log an error and attempt to retry once a new peer joins the node. -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::manager::{BatchProcessResult, Id}; use crate::sync::network_context::SyncNetworkContext; use crate::sync::range_sync::{ @@ -537,8 +537,8 @@ impl BackFillSync { self.current_processing_batch = Some(batch_id); if let Err(e) = network - .processor_channel() - .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) + .beacon_processor() + .send_chain_segment(process_id, blocks) { crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch", "error" => %e, "batch" => self.processing_target); diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index aa2694769c..4340aa41d8 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -2,6 +2,7 @@ use std::collections::hash_map::Entry; use std::collections::HashMap; use std::time::Duration; +use crate::network_beacon_processor::ChainSegmentProcessId; use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; @@ -11,7 +12,6 @@ use smallvec::SmallVec; use std::sync::Arc; use store::{Hash256, SignedBeaconBlock}; -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::metrics; use self::parent_lookup::PARENT_FAIL_TOLERANCE; @@ -542,8 +542,8 @@ impl BlockLookups { BlockProcessResult::Ok | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { // Check if the beacon processor is available - let beacon_processor_send = match cx.processor_channel_if_enabled() { - Some(channel) => channel, + let beacon_processor = match cx.beacon_processor_if_enabled() { + Some(beacon_processor) => beacon_processor, None => { return trace!( self.log, @@ -555,7 +555,7 @@ impl BlockLookups { let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing(); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); - match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) { + match beacon_processor.send_chain_segment(process_id, blocks) { Ok(_) => { self.processing_parent_lookups .insert(chain_hash, (hashes, request)); @@ -664,11 +664,15 @@ impl BlockLookups { process_type: BlockProcessType, cx: &mut SyncNetworkContext, ) -> Result<(), ()> { - match cx.processor_channel_if_enabled() { - Some(beacon_processor_send) => { + match cx.beacon_processor_if_enabled() { + Some(beacon_processor) => { trace!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type); - let event = WorkEvent::rpc_beacon_block(block_root, block, duration, process_type); - if let Err(e) = beacon_processor_send.try_send(event) { + if let Err(e) = beacon_processor.send_rpc_beacon_block( + block_root, + block, + duration, + process_type, + ) { error!( self.log, "Failed to send sync block to processor"; diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 82334db0f8..c588f867bd 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::beacon_processor::BeaconProcessorSend; +use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::RequestId; use crate::sync::manager::RequestId as SyncId; use crate::NetworkMessage; @@ -9,18 +9,19 @@ use super::*; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; +use beacon_processor::WorkEvent; use lighthouse_network::{NetworkGlobals, Request}; use slog::{Drain, Level}; -use slot_clock::SystemTimeSlotClock; +use slot_clock::ManualSlotClock; use store::MemoryStore; use tokio::sync::mpsc; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use types::MinimalEthSpec as E; -type T = Witness, E, MemoryStore, MemoryStore>; +type T = Witness, E, MemoryStore, MemoryStore>; struct TestRig { - beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx: mpsc::Receiver>, network_rx: mpsc::UnboundedReceiver>, rng: XorShiftRng, } @@ -41,8 +42,10 @@ impl TestRig { } }; - let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(100); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); + let (network_beacon_processor, beacon_processor_rx) = + NetworkBeaconProcessor::null_for_testing(globals); let rng = XorShiftRng::from_seed([42; 16]); let rig = TestRig { beacon_processor_rx, @@ -51,11 +54,9 @@ impl TestRig { }; let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); let cx = { - let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); SyncNetworkContext::new( network_tx, - globals, - BeaconProcessorSend(beacon_processor_tx), + Arc::new(network_beacon_processor), log.new(slog::o!("component" => "network_context")), ) }; @@ -102,7 +103,7 @@ impl TestRig { fn expect_block_process(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), crate::beacon_processor::RPC_BLOCK); + assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); } other => panic!("Expected block process, found {:?}", other), } @@ -112,7 +113,7 @@ impl TestRig { fn expect_parent_chain_process(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), crate::beacon_processor::CHAIN_SEGMENT); + assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT); } other => panic!("Expected chain segment process, found {:?}", other), } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c24d4c192b..72542752c5 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,7 +38,7 @@ use super::block_lookups::BlockLookups; use super::network_context::SyncNetworkContext; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use crate::beacon_processor::{BeaconProcessorSend, ChainSegmentProcessId}; +use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; @@ -159,9 +159,6 @@ pub struct SyncManager { /// A reference to the underlying beacon chain. chain: Arc>, - /// A reference to the network globals and peer-db. - network_globals: Arc>, - /// A receiving channel sent by the message processor thread. input_channel: mpsc::UnboundedReceiver>, @@ -186,29 +183,22 @@ pub struct SyncManager { pub fn spawn( executor: task_executor::TaskExecutor, beacon_chain: Arc>, - network_globals: Arc>, network_send: mpsc::UnboundedSender>, - beacon_processor_send: BeaconProcessorSend, + beacon_processor: Arc>, + sync_recv: mpsc::UnboundedReceiver>, log: slog::Logger, -) -> mpsc::UnboundedSender> { +) { assert!( MAX_REQUEST_BLOCKS >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" ); - // generate the message channel - let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); // create an instance of the SyncManager + let network_globals = beacon_processor.network_globals.clone(); let mut sync_manager = SyncManager { chain: beacon_chain.clone(), - network_globals: network_globals.clone(), input_channel: sync_recv, - network: SyncNetworkContext::new( - network_send, - network_globals.clone(), - beacon_processor_send, - log.clone(), - ), + network: SyncNetworkContext::new(network_send, beacon_processor, log.clone()), range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), backfill_sync: BackFillSync::new(beacon_chain, network_globals, log.clone()), block_lookups: BlockLookups::new(log.clone()), @@ -218,10 +208,13 @@ pub fn spawn( // spawn the sync manager thread debug!(log, "Sync Manager started"); executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync"); - sync_send } impl SyncManager { + fn network_globals(&self) -> &NetworkGlobals { + self.network.network_globals() + } + /* Input Handling Functions */ /// A peer has connected which has blocks that are unknown to us. @@ -322,12 +315,12 @@ impl SyncManager { let rpr = new_state.as_str(); // Drop the write lock let update_sync_status = self - .network_globals + .network_globals() .peers .write() .update_sync_status(peer_id, new_state.clone()); if let Some(was_updated) = update_sync_status { - let is_connected = self.network_globals.peers.read().is_connected(peer_id); + let is_connected = self.network_globals().peers.read().is_connected(peer_id); if was_updated { debug!( self.log, @@ -383,7 +376,7 @@ impl SyncManager { let head = self.chain.best_slot(); let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0)); - let peers = self.network_globals.peers.read(); + let peers = self.network_globals().peers.read(); if current_slot >= head && current_slot.sub(head) <= (SLOT_IMPORT_TOLERANCE as u64) && head > 0 @@ -445,8 +438,8 @@ impl SyncManager { }, }; - let old_state = self.network_globals.set_sync_state(new_state); - let new_state = self.network_globals.sync_state.read(); + let old_state = self.network_globals().set_sync_state(new_state); + let new_state = self.network_globals().sync_state.read().clone(); if !new_state.eq(&old_state) { info!(self.log, "Sync state updated"; "old_state" => %old_state, "new_state" => %new_state); // If we have become synced - Subscribe to all the core subnet topics @@ -505,7 +498,7 @@ impl SyncManager { } SyncMessage::UnknownBlock(peer_id, block, block_root) => { // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore - if !self.network_globals.sync_state.read().is_synced() { + if !self.network_globals().sync_state.read().is_synced() { let head_slot = self.chain.canonical_head.cached_head().head_slot(); let unknown_block_slot = block.slot(); @@ -519,7 +512,7 @@ impl SyncManager { return; } } - if self.network_globals.peers.read().is_connected(&peer_id) + if self.network_globals().peers.read().is_connected(&peer_id) && self.network.is_execution_engine_online() { self.block_lookups @@ -528,8 +521,8 @@ impl SyncManager { } SyncMessage::UnknownBlockHash(peer_id, block_hash) => { // If we are not synced, ignore this block. - if self.network_globals.sync_state.read().is_synced() - && self.network_globals.peers.read().is_connected(&peer_id) + if self.network_globals().sync_state.read().is_synced() + && self.network_globals().peers.read().is_connected(&peer_id) && self.network.is_execution_engine_online() { self.block_lookups diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 03c466eece..adc235130b 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -3,7 +3,7 @@ use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ChainId}; -use crate::beacon_processor::BeaconProcessorSend; +use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChainTypes, EngineState}; @@ -20,9 +20,6 @@ pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. network_send: mpsc::UnboundedSender>, - /// Access to the network global vars. - network_globals: Arc>, - /// A sequential ID for all RPC requests. request_id: Id, @@ -36,8 +33,8 @@ pub struct SyncNetworkContext { /// `beacon_processor_send`. execution_engine_state: EngineState, - /// Channel to send work to the beacon processor. - beacon_processor_send: BeaconProcessorSend, + /// Sends work to the beacon processor via a channel. + network_beacon_processor: Arc>, /// Logger for the `SyncNetworkContext`. log: slog::Logger, @@ -46,25 +43,27 @@ pub struct SyncNetworkContext { impl SyncNetworkContext { pub fn new( network_send: mpsc::UnboundedSender>, - network_globals: Arc>, - beacon_processor_send: BeaconProcessorSend, + network_beacon_processor: Arc>, log: slog::Logger, ) -> Self { Self { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start - network_globals, request_id: 1, range_requests: FnvHashMap::default(), backfill_requests: FnvHashMap::default(), - beacon_processor_send, + network_beacon_processor, log, } } + pub fn network_globals(&self) -> &NetworkGlobals { + &self.network_beacon_processor.network_globals + } + /// Returns the Client type of the peer if known pub fn client_type(&self, peer_id: &PeerId) -> Client { - self.network_globals + self.network_globals() .peers .read() .peer_info(peer_id) @@ -278,13 +277,13 @@ impl SyncNetworkContext { }) } - pub fn processor_channel_if_enabled(&self) -> Option<&BeaconProcessorSend> { + pub fn beacon_processor_if_enabled(&self) -> Option<&Arc>> { self.is_execution_engine_online() - .then_some(&self.beacon_processor_send) + .then_some(&self.network_beacon_processor) } - pub fn processor_channel(&self) -> &BeaconProcessorSend { - &self.beacon_processor_send + pub fn beacon_processor(&self) -> &Arc> { + &self.network_beacon_processor } fn next_id(&mut self) -> Id { diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 51ca9e2b07..af547885dc 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,5 +1,5 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::{ manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, }; @@ -294,8 +294,8 @@ impl SyncingChain { return Ok(KeepChain); } - let beacon_processor_send = match network.processor_channel_if_enabled() { - Some(channel) => channel, + let beacon_processor = match network.beacon_processor_if_enabled() { + Some(beacon_processor) => beacon_processor, None => return Ok(KeepChain), }; @@ -317,9 +317,7 @@ impl SyncingChain { let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id); self.current_processing_batch = Some(batch_id); - if let Err(e) = - beacon_processor_send.try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) - { + if let Err(e) = beacon_processor.send_chain_segment(process_id, blocks) { crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch", "error" => %e, "batch" => self.processing_target); // This is unlikely to happen but it would stall syncing since the batch now has no diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 2c35c57d9e..05ad5204b9 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -371,22 +371,23 @@ where #[cfg(test)] mod tests { + use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::RequestId; use crate::NetworkMessage; use super::*; - use crate::beacon_processor::{BeaconProcessorSend, WorkEvent as BeaconWorkEvent}; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::parking_lot::RwLock; use beacon_chain::EngineState; + use beacon_processor::WorkEvent as BeaconWorkEvent; use lighthouse_network::rpc::BlocksByRangeRequest; use lighthouse_network::Request; use lighthouse_network::{rpc::StatusMessage, NetworkGlobals}; use slog::{o, Drain}; use tokio::sync::mpsc; - use slot_clock::SystemTimeSlotClock; + use slot_clock::ManualSlotClock; use std::collections::HashSet; use std::sync::Arc; use store::MemoryStore; @@ -437,7 +438,7 @@ mod tests { } type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; + Witness, E, MemoryStore, MemoryStore>; fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { let decorator = slog_term::TermDecorator::new().build(); @@ -455,7 +456,7 @@ mod tests { struct TestRig { log: slog::Logger, /// To check what does sync send to the beacon processor. - beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx: mpsc::Receiver>, /// To set up different scenarios where sync is told about known/unkown blocks. chain: Arc, /// Needed by range to handle communication with the network. @@ -583,7 +584,7 @@ mod tests { fn expect_chain_segment(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), crate::beacon_processor::CHAIN_SEGMENT); + assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT); } other => panic!("Expected chain segment process, found {:?}", other), } @@ -593,17 +594,17 @@ mod tests { fn range(log_enabled: bool) -> (TestRig, RangeSync) { let chain = Arc::new(FakeStorage::default()); let log = build_log(slog::Level::Trace, log_enabled); - let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(10); let range_sync = RangeSync::::new( chain.clone(), log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); + let (network_beacon_processor, beacon_processor_rx) = + NetworkBeaconProcessor::null_for_testing(globals.clone()); let cx = SyncNetworkContext::new( network_tx, - globals.clone(), - BeaconProcessorSend(beacon_processor_tx), + Arc::new(network_beacon_processor), log.new(o!("component" => "network_context")), ); let test_rig = TestRig { From 420e9c490e4bb9f67ca6983e861d5f7f3cc2be73 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 12 Jul 2023 07:05:58 +0000 Subject: [PATCH 04/75] Add state-root command and network support to lcli (#4492) ## Proposed Changes * Add `lcli state-root` command for computing the hash tree root of a `BeaconState`. * Add a `--network` flag which can be used instead of `--testnet-dir` to set the network, e.g. Mainnet, Goerli, Gnosis. * Use the new network flag in `transition-blocks`, `skip-slots`, and `block-root`, which previously only supported mainnet. * **BREAKING CHANGE** Remove the default value of `~/.lighthouse/testnet` from `--testnet-dir`. This may have made sense in previous versions where `lcli` was more testnet focussed, but IMO it is an unnecessary complication and foot-gun today. --- lcli/src/block_root.rs | 9 ++- lcli/src/main.rs | 136 ++++++++++++++++++++++++++++------ lcli/src/skip_slots.rs | 9 ++- lcli/src/state_root.rs | 76 +++++++++++++++++++ lcli/src/transition_blocks.rs | 9 ++- 5 files changed, 211 insertions(+), 28 deletions(-) create mode 100644 lcli/src/state_root.rs diff --git a/lcli/src/block_root.rs b/lcli/src/block_root.rs index a47b48a30a..a4237d855b 100644 --- a/lcli/src/block_root.rs +++ b/lcli/src/block_root.rs @@ -31,14 +31,19 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use eth2_network_config::Eth2NetworkConfig; use std::path::PathBuf; use std::time::{Duration, Instant}; use types::{EthSpec, FullPayload, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(5); -pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { - let spec = &T::default_spec(); +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let spec = &network_config.chain_spec::()?; let executor = env.core_context().executor; /* diff --git a/lcli/src/main.rs b/lcli/src/main.rs index d072beaa4e..565a5325f0 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -15,11 +15,13 @@ mod new_testnet; mod parse_ssz; mod replace_state_pubkeys; mod skip_slots; +mod state_root; mod transition_blocks; use clap::{App, Arg, ArgMatches, SubCommand}; -use clap_utils::parse_path_with_default_in_home_dir; +use clap_utils::parse_optional; use environment::{EnvironmentBuilder, LoggerConfig}; +use eth2_network_config::Eth2NetworkConfig; use parse_ssz::run_parse_ssz; use std::path::PathBuf; use std::process; @@ -50,7 +52,16 @@ fn main() { .value_name("PATH") .takes_value(true) .global(true) - .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), + .help("The testnet dir."), + ) + .arg( + Arg::with_name("network") + .long("network") + .value_name("NAME") + .takes_value(true) + .global(true) + .help("The network to use. Defaults to mainnet.") + .conflicts_with("testnet-dir") ) .subcommand( SubCommand::with_name("skip-slots") @@ -126,7 +137,7 @@ fn main() { .takes_value(true) .conflicts_with("beacon-url") .requires("block-path") - .help("Path to load a BeaconState from file as SSZ."), + .help("Path to load a BeaconState from as SSZ."), ) .arg( Arg::with_name("block-path") @@ -135,7 +146,7 @@ fn main() { .takes_value(true) .conflicts_with("beacon-url") .requires("pre-state-path") - .help("Path to load a SignedBeaconBlock from file as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ."), ) .arg( Arg::with_name("post-state-output-path") @@ -808,14 +819,14 @@ fn main() { ) .subcommand( SubCommand::with_name("block-root") - .about("Computes the block root of some block") + .about("Computes the block root of some block.") .arg( Arg::with_name("block-path") .long("block-path") .value_name("PATH") .takes_value(true) .conflicts_with("beacon-url") - .help("Path to load a SignedBeaconBlock from file as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ."), ) .arg( Arg::with_name("beacon-url") @@ -841,6 +852,41 @@ fn main() { .help("Number of repeat runs, useful for benchmarking."), ) ) + .subcommand( + SubCommand::with_name("state-root") + .about("Computes the state root of some state.") + .arg( + Arg::with_name("state-path") + .long("state-path") + .value_name("PATH") + .takes_value(true) + .conflicts_with("beacon-url") + .help("Path to load a BeaconState from as SSZ."), + ) + .arg( + Arg::with_name("beacon-url") + .long("beacon-url") + .value_name("URL") + .takes_value(true) + .help("URL to a beacon-API provider."), + ) + .arg( + Arg::with_name("state-id") + .long("state-id") + .value_name("BLOCK_ID") + .takes_value(true) + .requires("beacon-url") + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + ) + .arg( + Arg::with_name("runs") + .long("runs") + .value_name("INTEGER") + .takes_value(true) + .default_value("1") + .help("Number of repeat runs, useful for benchmarking."), + ) + ) .get_matches(); let result = matches @@ -887,17 +933,44 @@ fn run( .build() .map_err(|e| format!("should build env: {:?}", e))?; - let testnet_dir = parse_path_with_default_in_home_dir( - matches, - "testnet-dir", - PathBuf::from(directory::DEFAULT_ROOT_DIR).join("testnet"), - )?; + // Determine testnet-dir path or network name depending on CLI flags. + let (testnet_dir, network_name) = + if let Some(testnet_dir) = parse_optional::(matches, "testnet-dir")? { + (Some(testnet_dir), None) + } else { + let network_name = + parse_optional(matches, "network")?.unwrap_or_else(|| "mainnet".to_string()); + (None, Some(network_name)) + }; + + // Lazily load either the testnet dir or the network config, as required. + // Some subcommands like new-testnet need the testnet dir but not the network config. + let get_testnet_dir = || testnet_dir.clone().ok_or("testnet-dir is required"); + let get_network_config = || { + if let Some(testnet_dir) = &testnet_dir { + Eth2NetworkConfig::load(testnet_dir.clone()).map_err(|e| { + format!( + "Unable to open testnet dir at {}: {}", + testnet_dir.display(), + e + ) + }) + } else { + let network_name = network_name.ok_or("no network name or testnet-dir provided")?; + Eth2NetworkConfig::constant(&network_name)?.ok_or("invalid network name".into()) + } + }; match matches.subcommand() { - ("transition-blocks", Some(matches)) => transition_blocks::run::(env, matches) - .map_err(|e| format!("Failed to transition blocks: {}", e)), + ("transition-blocks", Some(matches)) => { + let network_config = get_network_config()?; + transition_blocks::run::(env, network_config, matches) + .map_err(|e| format!("Failed to transition blocks: {}", e)) + } ("skip-slots", Some(matches)) => { - skip_slots::run::(env, matches).map_err(|e| format!("Failed to skip slots: {}", e)) + let network_config = get_network_config()?; + skip_slots::run::(env, network_config, matches) + .map_err(|e| format!("Failed to skip slots: {}", e)) } ("pretty-ssz", Some(matches)) => { run_parse_ssz::(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) @@ -906,22 +979,33 @@ fn run( deploy_deposit_contract::run::(env, matches) .map_err(|e| format!("Failed to run deploy-deposit-contract command: {}", e)) } - ("eth1-genesis", Some(matches)) => eth1_genesis::run::(env, testnet_dir, matches) - .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)), - ("interop-genesis", Some(matches)) => interop_genesis::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run interop-genesis command: {}", e)), + ("eth1-genesis", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; + eth1_genesis::run::(env, testnet_dir, matches) + .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)) + } + ("interop-genesis", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; + interop_genesis::run::(testnet_dir, matches) + .map_err(|e| format!("Failed to run interop-genesis command: {}", e)) + } ("change-genesis-time", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; change_genesis_time::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)) } ("create-payload-header", Some(matches)) => create_payload_header::run::(matches) .map_err(|e| format!("Failed to run create-payload-header command: {}", e)), ("replace-state-pubkeys", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; replace_state_pubkeys::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run replace-state-pubkeys command: {}", e)) } - ("new-testnet", Some(matches)) => new_testnet::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run new_testnet command: {}", e)), + ("new-testnet", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; + new_testnet::run::(testnet_dir, matches) + .map_err(|e| format!("Failed to run new_testnet command: {}", e)) + } ("check-deposit-data", Some(matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) @@ -932,8 +1016,16 @@ fn run( .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), ("indexed-attestations", Some(matches)) => indexed_attestations::run::(matches) .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), - ("block-root", Some(matches)) => block_root::run::(env, matches) - .map_err(|e| format!("Failed to run block-root command: {}", e)), + ("block-root", Some(matches)) => { + let network_config = get_network_config()?; + block_root::run::(env, network_config, matches) + .map_err(|e| format!("Failed to run block-root command: {}", e)) + } + ("state-root", Some(matches)) => { + let network_config = get_network_config()?; + state_root::run::(env, network_config, matches) + .map_err(|e| format!("Failed to run state-root command: {}", e)) + } (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index e3b2a5acbf..31fe9fe648 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -49,6 +49,7 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; use state_processing::state_advance::{complete_state_advance, partial_state_advance}; use std::fs::File; @@ -59,8 +60,12 @@ use types::{BeaconState, CloneConfig, EthSpec, Hash256}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); -pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { - let spec = &T::default_spec(); +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let spec = &network_config.chain_spec::()?; let executor = env.core_context().executor; let output_path: Option = parse_optional(matches, "output-path")?; diff --git a/lcli/src/state_root.rs b/lcli/src/state_root.rs new file mode 100644 index 0000000000..efcee2827a --- /dev/null +++ b/lcli/src/state_root.rs @@ -0,0 +1,76 @@ +use crate::transition_blocks::load_from_ssz_with; +use clap::ArgMatches; +use clap_utils::{parse_optional, parse_required}; +use environment::Environment; +use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use eth2_network_config::Eth2NetworkConfig; +use std::path::PathBuf; +use std::time::{Duration, Instant}; +use types::{BeaconState, EthSpec}; + +const HTTP_TIMEOUT: Duration = Duration::from_secs(10); + +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let executor = env.core_context().executor; + + let spec = &network_config.chain_spec::()?; + + let state_path: Option = parse_optional(matches, "state-path")?; + let beacon_url: Option = parse_optional(matches, "beacon-url")?; + let runs: usize = parse_required(matches, "runs")?; + + info!( + "Using {} network ({} spec)", + spec.config_name.as_deref().unwrap_or("unknown"), + T::spec_name() + ); + info!("Doing {} runs", runs); + + let state = match (state_path, beacon_url) { + (Some(state_path), None) => { + info!("State path: {:?}", state_path); + load_from_ssz_with(&state_path, spec, BeaconState::from_ssz_bytes)? + } + (None, Some(beacon_url)) => { + let state_id: StateId = parse_required(matches, "state-id")?; + let client = BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(HTTP_TIMEOUT)); + executor + .handle() + .ok_or("shutdown in progress")? + .block_on(async move { + client + .get_debug_beacon_states::(state_id) + .await + .map_err(|e| format!("Failed to download state: {:?}", e)) + }) + .map_err(|e| format!("Failed to complete task: {:?}", e))? + .ok_or_else(|| format!("Unable to locate state at {:?}", state_id))? + .data + } + _ => return Err("must supply either --state-path or --beacon-url".into()), + }; + + /* + * Perform the core "runs". + */ + let mut state_root = None; + for i in 0..runs { + let mut state = state.clone(); + let timer = Instant::now(); + state_root = Some( + state + .update_tree_hash_cache() + .map_err(|e| format!("error computing state root: {e:?}"))?, + ); + info!("Run {}: {:?}", i, timer.elapsed()); + } + + if let Some(state_root) = state_root { + info!("State root is {:?}", state_root); + } + Ok(()) +} diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 34a4560761..85705177dc 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -71,6 +71,7 @@ use eth2::{ types::{BlockId, StateId}, BeaconNodeHttpClient, SensitiveUrl, Timeouts, }; +use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; use state_processing::{ block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, @@ -94,8 +95,12 @@ struct Config { exclude_post_block_thc: bool, } -pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { - let spec = &T::default_spec(); +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let spec = &network_config.chain_spec::()?; let executor = env.core_context().executor; /* From 62c9170755c43395d228fb2f40e08e947d3f2ca8 Mon Sep 17 00:00:00 2001 From: Jack McPherson Date: Wed, 12 Jul 2023 07:06:00 +0000 Subject: [PATCH 05/75] Remove hidden re-exports to appease Rust 1.73 (#4495) ## Issue Addressed #4494 ## Proposed Changes - Remove explicit re-exports of various types to appease the new compiler lint ## Additional Info It seems `warn(hidden_glob_reexports)` is the main culprit. --- beacon_node/execution_layer/src/lib.rs | 8 ++++---- beacon_node/http_api/tests/fork_tests.rs | 7 ++----- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 0e9df7a50d..2720569c8d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -38,11 +38,11 @@ use tokio::{ }; use tokio_stream::wrappers::WatchStream; use tree_hash::TreeHash; -use types::{AbstractExecPayload, BeaconStateError, ExecPayload, Withdrawals}; +use types::{AbstractExecPayload, BeaconStateError, ExecPayload}; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload, - ExecutionPayloadCapella, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, - ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, + BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionPayloadCapella, ExecutionPayloadMerge, + ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, + Slot, }; mod block_hash; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 8a3ba887b3..0ab3c706e2 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -326,11 +326,8 @@ async fn sync_committee_indices_across_fork() { /// Assert that an HTTP API error has the given status code and indexed errors for the given indices. fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec) { - let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { - code, - failures, - .. - }) = error else { + let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { code, failures, .. }) = error + else { panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") }; assert_eq!(code, status_code); From 0c7eed5e580b0f083042860761809282f2f7362b Mon Sep 17 00:00:00 2001 From: Tyler Date: Wed, 12 Jul 2023 22:02:26 +0000 Subject: [PATCH 06/75] bump proc-macro2 (#4464) ## Issue Addressed Addresses issue: #4459 ## Proposed Changes `cargo update -p proc-macro2` proc-macro2 v1.0.58 -> v1.0.63 ## Additional Info See: #4459 / https://github.com/rust-lang/rust/issues/113152 --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 532f7ff204..3eaa645e6d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6323,9 +6323,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.58" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" +checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" dependencies = [ "unicode-ident", ] From 6c375205fb4ae78b5b532352dea1e75584f6fa44 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 17 Jul 2023 00:14:12 +0000 Subject: [PATCH 07/75] Fix HTTP state API bug and add `--epochs-per-migration` (#4236) ## Issue Addressed Fix an issue observed by `@zlan` on Discord where Lighthouse would sometimes return this error when looking up states via the API: > {"code":500,"message":"UNHANDLED_ERROR: ForkChoiceError(MissingProtoArrayBlock(0xc9cf1495421b6ef3215d82253b388d77321176a1dcef0db0e71a0cd0ffc8cdb7))","stacktraces":[]} ## Proposed Changes The error stems from a faulty assumption in the HTTP API logic: that any state in the hot database must have its block in fork choice. This isn't true because the state's hot database may update much less frequently than the fork choice store, e.g. if reconstructing states (where freezer migration pauses), or if the freezer migration runs slowly. There could also be a race between loading the hot state and checking fork choice, e.g. even if the finalization migration of DB+fork choice were atomic, the update could happen between the 1st and 2nd calls. To address this I've changed the HTTP API logic to use the finalized block's execution status as a fallback where it is safe to do so. In the case where a block is non-canonical and prior to finalization (permanently orphaned) we default `execution_optimistic` to `true`. ## Additional Info I've also added a new CLI flag to reduce the frequency of the finalization migration as this is useful for several purposes: - Spacing out database writes (less frequent, larger batches) - Keeping a limited chain history with high availability, e.g. the last month in the hot database. This new flag made it _substantially_ easier to test this change. It was extracted from `tree-states` (where it's called `--db-migration-period`), which is why this PR also carries the `tree-states` label. --- beacon_node/beacon_chain/src/beacon_chain.rs | 35 ++++++++- beacon_node/beacon_chain/src/chain_config.rs | 3 + beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/migrate.rs | 61 ++++++++++++++- beacon_node/beacon_chain/src/test_utils.rs | 9 ++- beacon_node/client/src/builder.rs | 5 +- beacon_node/http_api/src/state_id.rs | 33 ++++++-- .../http_api/tests/interactive_tests.rs | 75 ++++++++++++++++++- beacon_node/src/cli.rs | 10 +++ beacon_node/src/config.rs | 6 ++ lighthouse/tests/beacon_node.rs | 18 +++++ 11 files changed, 240 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 01343ff3b1..78f2c3f03b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -197,6 +197,17 @@ pub struct PrePayloadAttributes { pub parent_block_number: u64, } +/// Information about a state/block at a specific slot. +#[derive(Debug, Clone, Copy)] +pub struct FinalizationAndCanonicity { + /// True if the slot of the state or block is finalized. + /// + /// This alone DOES NOT imply that the state/block is finalized, use `self.is_finalized()`. + pub slot_is_finalized: bool, + /// True if the state or block is canonical at its slot. + pub canonical: bool, +} + /// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already /// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] @@ -426,6 +437,12 @@ pub struct BeaconChain { type BeaconBlockAndState = (BeaconBlock, BeaconState); +impl FinalizationAndCanonicity { + pub fn is_finalized(self) -> bool { + self.slot_is_finalized && self.canonical + } +} + impl BeaconChain { /// Checks if a block is finalized. /// The finalization check is done with the block slot. The block root is used to verify that @@ -455,16 +472,30 @@ impl BeaconChain { state_root: &Hash256, state_slot: Slot, ) -> Result { + self.state_finalization_and_canonicity(state_root, state_slot) + .map(FinalizationAndCanonicity::is_finalized) + } + + /// Fetch the finalization and canonicity status of the state with `state_root`. + pub fn state_finalization_and_canonicity( + &self, + state_root: &Hash256, + state_slot: Slot, + ) -> Result { let finalized_slot = self .canonical_head .cached_head() .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch()); - let is_canonical = self + let slot_is_finalized = state_slot <= finalized_slot; + let canonical = self .state_root_at_slot(state_slot)? .map_or(false, |canonical_root| state_root == &canonical_root); - Ok(state_slot <= finalized_slot && is_canonical) + Ok(FinalizationAndCanonicity { + slot_is_finalized, + canonical, + }) } /// Persists the head tracker and fork choice. diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index cc7a957ecc..efbc9905b7 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -83,6 +83,8 @@ pub struct ChainConfig { pub enable_backfill_rate_limiting: bool, /// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation. pub progressive_balances_mode: ProgressiveBalancesMode, + /// Number of epochs between each migration of data from the hot database to the freezer. + pub epochs_per_migration: u64, } impl Default for ChainConfig { @@ -114,6 +116,7 @@ impl Default for ChainConfig { always_prepare_payload: false, enable_backfill_rate_limiting: true, progressive_balances_mode: ProgressiveBalancesMode::Checked, + epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, } } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index c5cf74e179..85ff0f20a0 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -73,6 +73,7 @@ pub use execution_layer::EngineState; pub use execution_payload::NotifyExecutionLayer; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; +pub use migrate::MigratorConfig; pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 66f082742e..8306b66d7b 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -25,10 +25,15 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200; /// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`. const COMPACTION_FINALITY_DISTANCE: u64 = 1024; +/// Default number of epochs to wait between finalization migrations. +pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1; + /// The background migrator runs a thread to perform pruning and migrate state from the hot /// to the cold database. pub struct BackgroundMigrator, Cold: ItemStore> { db: Arc>, + /// Record of when the last migration ran, for enforcing `epochs_per_migration`. + prev_migration: Arc>, #[allow(clippy::type_complexity)] tx_thread: Option, thread::JoinHandle<()>)>>, /// Genesis block root, for persisting the `PersistedBeaconChain`. @@ -36,9 +41,22 @@ pub struct BackgroundMigrator, Cold: ItemStore> log: Logger, } -#[derive(Debug, Default, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct MigratorConfig { pub blocking: bool, + /// Run migrations at most once per `epochs_per_migration`. + /// + /// If set to 0 or 1, then run every finalization. + pub epochs_per_migration: u64, +} + +impl Default for MigratorConfig { + fn default() -> Self { + Self { + blocking: false, + epochs_per_migration: DEFAULT_EPOCHS_PER_MIGRATION, + } + } } impl MigratorConfig { @@ -46,6 +64,19 @@ impl MigratorConfig { self.blocking = true; self } + + pub fn epochs_per_migration(mut self, epochs_per_migration: u64) -> Self { + self.epochs_per_migration = epochs_per_migration; + self + } +} + +/// Record of when the last migration ran. +pub struct PrevMigration { + /// The epoch at which the last finalization migration ran. + epoch: Epoch, + /// The number of epochs to wait between runs. + epochs_per_migration: u64, } /// Pruning can be successful, or in rare cases deferred to a later point. @@ -92,6 +123,7 @@ pub struct FinalizationNotification { finalized_state_root: BeaconStateHash, finalized_checkpoint: Checkpoint, head_tracker: Arc, + prev_migration: Arc>, genesis_block_root: Hash256, } @@ -103,6 +135,11 @@ impl, Cold: ItemStore> BackgroundMigrator Self { + // Estimate last migration run from DB split slot. + let prev_migration = Arc::new(Mutex::new(PrevMigration { + epoch: db.get_split_slot().epoch(E::slots_per_epoch()), + epochs_per_migration: config.epochs_per_migration, + })); let tx_thread = if config.blocking { None } else { @@ -111,6 +148,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator prev_migration.epoch, + "new_finalized_epoch" => epoch, + "epochs_per_migration" => prev_migration.epochs_per_migration, + ); + return; + } + + // Update the previous migration epoch immediately to avoid holding the lock. If the + // migration doesn't succeed then the next migration will be retried at the next scheduled + // run. + prev_migration.epoch = epoch; + drop(prev_migration); + debug!(log, "Database consolidation started"); let finalized_state_root = notif.finalized_state_root; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6520c9ba9c..5fc05c5551 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -516,18 +516,23 @@ where let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); + let chain_config = self.chain_config.unwrap_or_default(); let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) .logger(log.clone()) .custom_spec(spec) .store(self.store.expect("cannot build without store")) - .store_migrator_config(MigratorConfig::default().blocking()) + .store_migrator_config( + MigratorConfig::default() + .blocking() + .epochs_per_migration(chain_config.epochs_per_migration), + ) .task_executor(self.runtime.task_executor.clone()) .execution_layer(self.execution_layer) .dummy_eth1_backend() .expect("should build dummy backend") .shutdown_sender(shutdown_tx) - .chain_config(self.chain_config.unwrap_or_default()) + .chain_config(chain_config) .event_handler(Some(ServerSentEventHandler::new_with_capacity( log.clone(), 5, diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index f3ed7f65a9..b1a507eaa5 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -11,7 +11,7 @@ use beacon_chain::{ slot_clock::{SlotClock, SystemTimeSlotClock}, state_advance_timer::spawn_state_advance_timer, store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, - BeaconChain, BeaconChainTypes, Eth1ChainBackend, ServerSentEventHandler, + BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessor, BeaconProcessorSend, @@ -184,6 +184,9 @@ where .store(store) .task_executor(context.executor.clone()) .custom_spec(spec.clone()) + .store_migrator_config( + MigratorConfig::default().epochs_per_migration(chain_config.epochs_per_migration), + ) .chain_config(chain_config) .graffiti(graffiti) .event_handler(event_handler) diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 9e4aadef17..5e86053771 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -70,15 +70,32 @@ impl StateId { .map_err(BeaconChainError::DBError) .map_err(warp_utils::reject::beacon_chain_error)? { - let execution_optimistic = chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) - .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)?; - let finalized = chain - .is_finalized_state(root, hot_summary.slot) + let finalization_status = chain + .state_finalization_and_canonicity(root, hot_summary.slot) .map_err(warp_utils::reject::beacon_chain_error)?; + let finalized = finalization_status.is_finalized(); + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let execution_optimistic = if finalization_status.slot_is_finalized + && !finalization_status.canonical + { + // This block is permanently orphaned and has likely been pruned from fork + // choice. If it isn't found in fork choice, mark it optimistic to be on the + // safe side. + fork_choice + .is_optimistic_or_invalid_block_no_fallback( + &hot_summary.latest_block_root, + ) + .unwrap_or(true) + } else { + // This block is either old and finalized, or recent and unfinalized, so + // it's safe to fallback to the optimistic status of the finalized block. + chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)? + }; return Ok((*root, execution_optimistic, finalized)); } else if let Some(_cold_state_slot) = chain .store diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index da92419744..d7ea7c2628 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -2,8 +2,9 @@ use beacon_chain::{ chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, + ChainConfig, }; -use eth2::types::DepositContractData; +use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; @@ -17,7 +18,7 @@ use std::time::Duration; use tree_hash::TreeHash; use types::{ Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, - MainnetEthSpec, ProposerPreparationData, Slot, + MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, }; type E = MainnetEthSpec; @@ -48,6 +49,76 @@ async fn deposit_contract_custom_network() { assert_eq!(result, expected); } +// Test that state lookups by root function correctly for states that are finalized but still +// present in the hot database, and have had their block pruned from fork choice. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn state_by_root_pruned_from_fork_choice() { + type E = MinimalEthSpec; + + let validator_count = 24; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + + let tester = InteractiveTester::::new_with_initializer_and_mutator( + Some(spec.clone()), + validator_count, + Some(Box::new(move |builder| { + builder + .deterministic_keypairs(validator_count) + .fresh_ephemeral_store() + .chain_config(ChainConfig { + epochs_per_migration: 1024, + ..ChainConfig::default() + }) + })), + None, + ) + .await; + + let client = &tester.client; + let harness = &tester.harness; + + // Create some chain depth and finalize beyond fork choice's pruning depth. + let num_epochs = 8_u64; + let num_initial = num_epochs * E::slots_per_epoch(); + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + ) + .await; + + // Should now be finalized. + let finalized_epoch = harness.finalized_checkpoint().epoch; + assert_eq!(finalized_epoch, num_epochs - 2); + + // The split slot should still be at 0. + assert_eq!(harness.chain.store.get_split_slot(), 0); + + // States that are between the split and the finalized slot should be able to be looked up by + // state root. + for slot in 0..finalized_epoch.start_slot(E::slots_per_epoch()).as_u64() { + let state_root = harness + .chain + .state_root_at_slot(Slot::new(slot)) + .unwrap() + .unwrap(); + let response = client + .get_debug_beacon_states::(StateId::Root(state_root)) + .await + .unwrap() + .unwrap(); + + assert!(response.finalized.unwrap()); + assert!(!response.execution_optimistic.unwrap()); + + let mut state = response.data; + assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); + } +} + /// Data structure for tracking fork choice updates received by the mock execution layer. #[derive(Debug, Default)] struct ForkChoiceUpdates { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index fc53f57888..e46c3d8ca1 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -533,6 +533,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { [default: 8192 (mainnet) or 64 (minimal)]") .takes_value(true) ) + .arg( + Arg::with_name("epochs-per-migration") + .long("epochs-per-migration") + .value_name("N") + .help("The number of epochs to wait between running the migration of data from the \ + hot DB to the cold DB. Less frequent runs can be useful for minimizing disk \ + writes") + .default_value("1") + .takes_value(true) + ) .arg( Arg::with_name("block-cache-size") .long("block-cache-size") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 948c70dd41..4abf649bfb 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -400,6 +400,12 @@ pub fn get_config( client_config.store.prune_payloads = prune_payloads; } + if let Some(epochs_per_migration) = + clap_utils::parse_optional(cli_args, "epochs-per-migration")? + { + client_config.chain.epochs_per_migration = epochs_per_migration; + } + /* * Zero-ports * diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4badcef3cf..bc5f610881 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1733,6 +1733,24 @@ fn no_reconstruct_historic_states_flag() { .run_with_zero_port() .with_config(|config| assert!(!config.chain.reconstruct_historic_states)); } +#[test] +fn epochs_per_migration_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.epochs_per_migration, + beacon_node::beacon_chain::migrate::DEFAULT_EPOCHS_PER_MIGRATION + ) + }); +} +#[test] +fn epochs_per_migration_override() { + CommandLineTest::new() + .flag("epochs-per-migration", Some("128")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.epochs_per_migration, 128)); +} // Tests for Slasher flags. // Using `--slasher-max-db-size` to work around https://github.com/sigp/lighthouse/issues/2342 From 5cd738c882e9fdd625492c52976c838e8c0a89a1 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 17 Jul 2023 00:14:13 +0000 Subject: [PATCH 08/75] Use unique arg names for eth1-sim (#4463) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed When trying to run `eth1-sim` locally, the simulator doesn't start for me, and panicked due to duplicate arg names for `proposer-nodes` (using same arg names as `nodes`). Not sure why this isn't failing on CI but failing on mine 🤔 ``` thread 'main' panicked at 'Argument short must be unique thread 'main' panicked at 'Argument long must be unique ``` --- testing/simulator/src/cli.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 5dc2d5ec84..ff80201051 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -25,8 +25,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("4") .help("Number of beacon nodes")) .arg(Arg::with_name("proposer-nodes") - .short("n") - .long("nodes") + .short("p") + .long("proposer_nodes") .takes_value(true) .default_value("2") .help("Number of proposer-only beacon nodes")) @@ -64,8 +64,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("4") .help("Number of beacon nodes")) .arg(Arg::with_name("proposer-nodes") - .short("n") - .long("nodes") + .short("p") + .long("proposer_nodes") .takes_value(true) .default_value("2") .help("Number of proposer-only beacon nodes")) From 1a5de8b0f0ccb147e0df410ffcc9d9e2a57da863 Mon Sep 17 00:00:00 2001 From: Jack McPherson Date: Mon, 17 Jul 2023 00:14:14 +0000 Subject: [PATCH 09/75] Remove instances of required arguments with default values (#4489) ## Issue Addressed #4488 ## Proposed Changes - Remove all instances of the `required` modifier where we have a default value specified for a subcommand ## Additional Info N/A --- lcli/src/main.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 565a5325f0..8fbf5638b4 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -40,7 +40,6 @@ fn main() { .long("spec") .value_name("STRING") .takes_value(true) - .required(true) .possible_values(&["minimal", "mainnet", "gnosis"]) .default_value("mainnet") .global(true), @@ -372,7 +371,6 @@ fn main() { .index(2) .value_name("BIP39_MNENMONIC") .takes_value(true) - .required(true) .default_value( "replace nephew blur decorate waste convince soup column \ orient excite play baby", @@ -393,7 +391,6 @@ fn main() { .help("The block hash used when generating an execution payload. This \ value is used for `execution_payload_header.block_hash` as well as \ `execution_payload_header.random`") - .required(true) .default_value( "0x0000000000000000000000000000000000000000000000000000000000000000", ), @@ -411,7 +408,6 @@ fn main() { .value_name("INTEGER") .takes_value(true) .help("The base fee per gas field in the execution payload generated.") - .required(true) .default_value("1000000000"), ) .arg( @@ -420,7 +416,6 @@ fn main() { .value_name("INTEGER") .takes_value(true) .help("The gas limit field in the execution payload generated.") - .required(true) .default_value("30000000"), ) .arg( From 03674c719992d600b7af111e5f6fa890d77a97bd Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 17 Jul 2023 00:14:15 +0000 Subject: [PATCH 10/75] Update mev-rs and remove patches (#4496) ## Issue Addressed Fixes occasional compilation errors with mev-rs (see #4456). ## Proposed Changes - Update `mev-rs` to the latest version, which allows us to remove hacky `[patch]` sections - Update the `axum` version used in `watch` so LH only uses a single version --- Cargo.lock | 96 ++++++++++--------- Cargo.toml | 7 -- beacon_node/execution_layer/Cargo.toml | 6 +- .../src/test_utils/mock_builder.rs | 72 +++++++------- watch/Cargo.toml | 2 +- watch/src/server/mod.rs | 3 +- 6 files changed, 88 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3eaa645e6d..a1f5c7e2af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -245,6 +245,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "anvil-rpc" +version = "0.1.0" +source = "git+https://github.com/foundry-rs/foundry?rev=b45456717ffae1af65acdc71099f8cb95e6683a0#b45456717ffae1af65acdc71099f8cb95e6683a0" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "anyhow" version = "1.0.71" @@ -494,9 +503,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", @@ -512,22 +521,23 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite 0.2.9", + "rustversion", "serde", "serde_json", + "serde_path_to_error", "serde_urlencoded", "sync_wrapper", "tokio", "tower", - "tower-http", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.2.9" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", @@ -535,6 +545,7 @@ dependencies = [ "http", "http-body", "mime", + "rustversion", "tower-layer", "tower-service", ] @@ -593,7 +604,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client#30679e9e25d61731cde54e14cd8a3688a39d8e5b" +source = "git+https://github.com/ralexstokes/beacon-api-client?rev=93d7e8c#93d7e8c38fe9782c4862909663e7b57c44f805a9" dependencies = [ "ethereum-consensus", "http", @@ -2492,7 +2503,7 @@ dependencies = [ [[package]] name = "ethereum-consensus" version = "0.1.1" -source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" +source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380108d15fcc40349927fdf3d11c71f9edb67c2" dependencies = [ "async-stream", "blst", @@ -2505,8 +2516,9 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", + "serde_yaml", "sha2 0.9.9", - "ssz-rs", + "ssz_rs", "thiserror", "tokio", "tokio-stream", @@ -2779,7 +2791,7 @@ dependencies = [ "serde_json", "slog", "slot_clock", - "ssz-rs", + "ssz_rs", "ssz_types", "state_processing", "strum", @@ -3503,12 +3515,6 @@ dependencies = [ "pin-project-lite 0.2.9", ] -[[package]] -name = "http-range-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" - [[package]] name = "http_api" version = "0.1.0" @@ -4969,9 +4975,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "md-5" @@ -5060,16 +5066,20 @@ dependencies = [ [[package]] name = "mev-rs" -version = "0.2.1" -source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889" +version = "0.3.0" +source = "git+https://github.com/ralexstokes/mev-rs?rev=216657016d5c0889b505857c89ae42c7aa2764af#216657016d5c0889b505857c89ae42c7aa2764af" dependencies = [ + "anvil-rpc", "async-trait", "axum", "beacon-api-client", "ethereum-consensus", "hyper", + "parking_lot 0.12.1", + "reqwest", "serde", - "ssz-rs", + "serde_json", + "ssz_rs", "thiserror", "tokio", "tracing", @@ -7368,6 +7378,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b1b6471d7496b051e03f1958802a73f88b947866f5146f329e47e36554f4e55" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_repr" version = "0.1.12" @@ -7865,23 +7885,24 @@ dependencies = [ ] [[package]] -name = "ssz-rs" -version = "0.8.0" -source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" +name = "ssz_rs" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f" dependencies = [ "bitvec 1.0.1", "hex", "num-bigint", "serde", "sha2 0.9.9", - "ssz-rs-derive", - "thiserror", + "ssz_rs_derive", ] [[package]] -name = "ssz-rs-derive" -version = "0.8.0" -source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" +name = "ssz_rs_derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ "proc-macro2", "quote", @@ -8640,25 +8661,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" -dependencies = [ - "bitflags", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite 0.2.9", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.2" diff --git a/Cargo.toml b/Cargo.toml index 775842a8c7..8b820d2a21 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -92,13 +92,6 @@ resolver = "2" warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } -[patch."https://github.com/ralexstokes/mev-rs"] -mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } -[patch."https://github.com/ralexstokes/ethereum-consensus"] -ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" } -[patch."https://github.com/ralexstokes/ssz-rs"] -ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" } - [profile.maxperf] inherits = "release" lto = "fat" diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 2cb28346f5..8492a5159e 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -41,9 +41,9 @@ lazy_static = "1.4.0" ethers-core = "1.0.2" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } -ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } +mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" } +ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" } +ssz_rs = "0.9.0" tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" keccak-hash = "0.10.0" diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 668d1fb3b1..9471be636d 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -11,11 +11,17 @@ use ethereum_consensus::{ }; use fork_choice::ForkchoiceUpdateParameters; use mev_rs::{ - bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, - capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, - sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, - BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, - SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, + blinded_block_provider::Server as BlindedBlockProviderServer, + signing::{sign_builder_message, verify_signed_builder_message}, + types::{ + bellatrix::{ + BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix, + }, + capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, + BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock, + SignedBuilderBid, SignedValidatorRegistration, + }, + Error as MevError, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -47,7 +53,7 @@ pub enum Operation { } impl Operation { - fn apply(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { + fn apply(self, bid: &mut B) -> Result<(), MevError> { match self { Operation::FeeRecipient(fee_recipient) => { *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? @@ -73,7 +79,7 @@ pub trait BidStuff { fn prev_randao_mut(&mut self) -> &mut Hash32; fn block_number_mut(&mut self) -> &mut u64; fn timestamp_mut(&mut self) -> &mut u64; - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>; fn sign_builder_message( &mut self, @@ -134,11 +140,9 @@ impl BidStuff for BuilderBid { } } - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> { match self { - Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( - "withdrawals_root called on bellatrix bid".to_string(), - )), + Self::Bellatrix(_) => Err(MevError::InvalidFork), Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), } } @@ -274,7 +278,7 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { + fn apply_operations(&self, bid: &mut B) -> Result<(), MevError> { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { op.apply(bid)?; @@ -288,7 +292,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { async fn register_validators( &self, registrations: &mut [SignedValidatorRegistration], - ) -> Result<(), BlindedBlockProviderError> { + ) -> Result<(), MevError> { for registration in registrations { let pubkey = registration.message.public_key.clone(); let message = &mut registration.message; @@ -307,10 +311,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { Ok(()) } - async fn fetch_best_bid( - &self, - bid_request: &BidRequest, - ) -> Result { + async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result { let slot = Slot::new(bid_request.slot); let fork = self.spec.fork_name_at_slot::(slot); let signed_cached_data = self @@ -336,7 +337,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .block_hash(); if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { - return Err(BlindedBlockProviderError::Custom(format!( + return Err(custom_err(format!( "head mismatch: {} {}", head_execution_hash, bid_request.parent_hash ))); @@ -396,7 +397,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { .get_debug_beacon_states(StateId::Head) .await .map_err(convert_err)? - .ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? + .ok_or_else(|| custom_err("missing head state".to_string()))? .data; let prev_randao = head_state .get_randao_mix(head_state.current_epoch()) @@ -409,10 +410,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) } ForkName::Base | ForkName::Altair => { - return Err(BlindedBlockProviderError::Custom(format!( - "Unsupported fork: {}", - fork - ))); + return Err(MevError::InvalidFork); } }; @@ -452,12 +450,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, public_key: self.builder_sk.public_key(), }), - ForkName::Base | ForkName::Altair => { - return Err(BlindedBlockProviderError::Custom(format!( - "Unsupported fork: {}", - fork - ))) - } + ForkName::Base | ForkName::Altair => return Err(MevError::InvalidFork), }; *message.gas_limit_mut() = cached_data.gas_limit; @@ -475,7 +468,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { async fn open_bid( &self, signed_block: &mut SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let node = match signed_block { SignedBlindedBeaconBlock::Bellatrix(block) => { block.message.body.execution_payload_header.hash_tree_root() @@ -496,9 +489,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { } } -pub fn from_ssz_rs( - ssz_rs_data: &T, -) -> Result { +pub fn from_ssz_rs(ssz_rs_data: &T) -> Result { U::from_ssz_bytes( ssz_rs::serialize(ssz_rs_data) .map_err(convert_err)? @@ -507,12 +498,17 @@ pub fn from_ssz_rs( .map_err(convert_err) } -pub fn to_ssz_rs( - ssz_data: &T, -) -> Result { +pub fn to_ssz_rs(ssz_data: &T) -> Result { ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) } -fn convert_err(e: E) -> BlindedBlockProviderError { - BlindedBlockProviderError::Custom(format!("{e:?}")) +fn convert_err(e: E) -> MevError { + custom_err(format!("{e:?}")) +} + +// This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`. +fn custom_err(s: String) -> MevError { + MevError::Consensus(ethereum_consensus::state_transition::Error::Io( + std::io::Error::new(std::io::ErrorKind::Other, s), + )) } diff --git a/watch/Cargo.toml b/watch/Cargo.toml index d1793a9d06..1a003167dc 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -21,7 +21,7 @@ types = { path = "../consensus/types" } eth2 = { path = "../common/eth2" } beacon_node = { path = "../beacon_node"} tokio = { version = "1.14.0", features = ["time"] } -axum = "0.5.15" +axum = "0.6.18" hyper = "0.14.20" serde = "1.0.116" serde_json = "1.0.58" diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs index 09d5ec6aac..d8ae0eb6c6 100644 --- a/watch/src/server/mod.rs +++ b/watch/src/server/mod.rs @@ -5,7 +5,6 @@ use crate::config::Config as FullConfig; use crate::database::{self, PgPool}; use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes}; use axum::{ - handler::Handler, http::{StatusCode, Uri}, routing::get, Extension, Json, Router, @@ -104,7 +103,7 @@ pub fn start_server( } let app = routes - .fallback(route_not_found.into_service()) + .fallback(route_not_found) .layer(Extension(pool)) .layer(Extension(slots_per_epoch)); From 5569d97a070b3fbd68f2515cd02bc527a9b5cf2d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 17 Jul 2023 00:14:16 +0000 Subject: [PATCH 11/75] Remove wget dependency (#4497) ## Proposed Changes Replace `wget` in the EF-tests makefile with `curl`. On macOS `curl` is pre-installed, and I found myself making this change to avoid installing `wget`. The `-L` flag is used to follow redirects which is useful if a repo gets renamed, and more similar to `wget`'s default behaviour. --- testing/ef_tests/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index f7562f477a..81a1739eb1 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -13,7 +13,7 @@ BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST)) BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME) BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG) -WGET := $(if $(LIGHTHOUSE_GITHUB_TOKEN),wget --header="Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",wget) +CURL := $(if $(LIGHTHOUSE_GITHUB_TOKEN),curl -L --header "Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",curl -L) all: make $(OUTPUT_DIR) @@ -27,11 +27,11 @@ $(OUTPUT_DIR): $(TARBALLS) $(BLS_OUTPUT_DIR): mkdir $(BLS_OUTPUT_DIR) - $(WGET) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL) + $(CURL) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -o $(BLS_TARBALL) tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR) %-$(TESTS_TAG).tar.gz: - $(WGET) $(BASE_URL)/$*.tar.gz -O $@ + $(CURL) $(BASE_URL)/$*.tar.gz -o $@ clean-test-files: rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) From 68d5a6cf993fc1f8d09eb190db71c0df735a90b2 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 17 Jul 2023 00:14:17 +0000 Subject: [PATCH 12/75] Clean up local testnet files without prompting (#4498) ## Issue Addressed Addresses an issue where CI could fail due to an nonexisting file error: ``` Run ./clean.sh rm: cannot remove '/home/runner/.lighthouse/local-testnet/geth_datadir4/geth/fastcache.tmp.1549331618': No such file or directory Error: Process completed with exit code 1. ``` This seems to happen quite frequently now, I'm not sure exactly why but perhaps worth trying suppressing the prompt? https://github.com/sigp/lighthouse/actions/runs/5455027574/jobs/9925916159?pr=4463 --- scripts/local_testnet/clean.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/local_testnet/clean.sh b/scripts/local_testnet/clean.sh index 6db8753d02..cd915e470d 100755 --- a/scripts/local_testnet/clean.sh +++ b/scripts/local_testnet/clean.sh @@ -9,5 +9,5 @@ set -Eeuo pipefail source ./vars.env if [ -d $DATADIR ]; then - rm -r $DATADIR + rm -rf $DATADIR fi From d4a61756caba0e52242fddfff095fb077afdb831 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 17 Jul 2023 00:14:18 +0000 Subject: [PATCH 13/75] CI fix: add retries to eth1 sim tests (#4501) ## Issue Addressed This PR attempts to workaround the recent frequent eth1 simulator failures caused by missing eth logs from Anvil. > FailedToInsertDeposit(NonConsecutive { log_index: 1, expected: 0 }) This usually occurs at the beginning of the tests, and it guarantees a timeout after a few hours if this log shows up, and this is currently causing our CIs to fail quite frequently. Example failure here: https://github.com/sigp/lighthouse/actions/runs/5525760195/jobs/10079736914 ## Proposed Changes The quick fix applied here adds a timeout to node startup and restarts the node again. - Add a 60 seconds timeout to beacon node startup in eth1 simulator tests. It takes ~10 seconds on my machine, but could take longer on CI runners. - Wrap the startup code in a retry function, that allows for 3 retries before returning an error. ## Additional Info We should probably raise an issue under the Anvil GitHub repo there so this can be further investigated. --- Cargo.lock | 1 + testing/node_test_rig/Cargo.toml | 1 + testing/node_test_rig/src/lib.rs | 19 ++-- testing/simulator/src/eth1_sim.rs | 168 ++++++++++++++++++------------ testing/simulator/src/main.rs | 1 + testing/simulator/src/retry.rs | 63 +++++++++++ 6 files changed, 183 insertions(+), 70 deletions(-) create mode 100644 testing/simulator/src/retry.rs diff --git a/Cargo.lock b/Cargo.lock index a1f5c7e2af..4bb3ab99e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5531,6 +5531,7 @@ dependencies = [ "execution_layer", "sensitive_url", "tempfile", + "tokio", "types", "validator_client", "validator_dir", diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index ea5d005c16..ac77349c58 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -14,3 +14,4 @@ validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } sensitive_url = { path = "../../common/sensitive_url" } execution_layer = { path = "../../beacon_node/execution_layer" } +tokio = { version = "1.14.0", features = ["time"] } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index d4fd115bec..62db67b8c5 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -10,6 +10,7 @@ use std::path::PathBuf; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use tempfile::{Builder as TempBuilder, TempDir}; +use tokio::time::timeout; use types::EthSpec; use validator_client::ProductionValidatorClient; use validator_dir::insecure_keys::build_deterministic_validator_dirs; @@ -24,6 +25,8 @@ pub use validator_client::Config as ValidatorConfig; /// The global timeout for HTTP requests to the beacon node. const HTTP_TIMEOUT: Duration = Duration::from_secs(4); +/// The timeout for a beacon node to start up. +const STARTUP_TIMEOUT: Duration = Duration::from_secs(60); /// Provides a beacon node that is running in the current process on a given tokio executor (it /// is _local_ to this process). @@ -51,12 +54,16 @@ impl LocalBeaconNode { client_config.set_data_dir(datadir.path().into()); client_config.network.network_dir = PathBuf::from(datadir.path()).join("network"); - ProductionBeaconNode::new(context, client_config) - .await - .map(move |client| Self { - client: client.into_inner(), - datadir, - }) + timeout( + STARTUP_TIMEOUT, + ProductionBeaconNode::new(context, client_config), + ) + .await + .map_err(|_| format!("Beacon node startup timed out after {:?}", STARTUP_TIMEOUT))? + .map(move |client| Self { + client: client.into_inner(), + datadir, + }) } } diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 3e764d27d0..57c944cf1a 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,14 +1,16 @@ use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; -use crate::{checks, LocalNetwork, E}; +use crate::{checks, LocalNetwork}; use clap::ArgMatches; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::AnvilEth1Instance; +use crate::retry::with_retry; use execution_layer::http::deposit_methods::Eth1Id; use futures::prelude::*; +use node_test_rig::environment::RuntimeContext; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, - testing_client_config, testing_validator_config, ClientGenesis, ValidatorFiles, + testing_client_config, testing_validator_config, ClientConfig, ClientGenesis, ValidatorFiles, }; use rayon::prelude::*; use sensitive_url::SensitiveUrl; @@ -107,71 +109,24 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let context = env.core_context(); let main_future = async { - /* - * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit - * validators. - */ - let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; - let deposit_contract = anvil_eth1_instance.deposit_contract; - let chain_id = anvil_eth1_instance.anvil.chain_id(); - let anvil = anvil_eth1_instance.anvil; - let eth1_endpoint = SensitiveUrl::parse(anvil.endpoint().as_str()) - .expect("Unable to parse anvil endpoint."); - let deposit_contract_address = deposit_contract.address(); - - // Start a timer that produces eth1 blocks on an interval. - tokio::spawn(async move { - let mut interval = tokio::time::interval(eth1_block_time); - loop { - interval.tick().await; - let _ = anvil.evm_mine().await; - } - }); - - // Submit deposits to the deposit contract. - tokio::spawn(async move { - for i in 0..total_validator_count { - println!("Submitting deposit for validator {}...", i); - let _ = deposit_contract - .deposit_deterministic_async::(i, deposit_amount) - .await; - } - }); - - let mut beacon_config = testing_client_config(); - - beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint); - beacon_config.eth1.deposit_contract_address = deposit_contract_address; - beacon_config.eth1.deposit_contract_deploy_block = 0; - beacon_config.eth1.lowest_cached_block_number = 0; - beacon_config.eth1.follow_distance = 1; - beacon_config.eth1.node_far_behind_seconds = 20; - beacon_config.dummy_eth1_backend = false; - beacon_config.sync_eth1_chain = true; - beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; - beacon_config.eth1.chain_id = Eth1Id::from(chain_id); - beacon_config.network.target_peers = node_count + proposer_nodes - 1; - - beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); - - if post_merge_sim { - let el_config = execution_layer::Config { - execution_endpoints: vec![SensitiveUrl::parse(&format!( - "http://localhost:{}", - EXECUTION_PORT - )) - .unwrap()], - ..Default::default() - }; - - beacon_config.execution_layer = Some(el_config); - } - /* * Create a new `LocalNetwork` with one beacon node. */ - let network = LocalNetwork::new(context.clone(), beacon_config.clone()).await?; + let max_retries = 3; + let (network, beacon_config) = with_retry(max_retries, || { + Box::pin(create_local_network( + LocalNetworkParams { + eth1_block_time, + total_validator_count, + deposit_amount, + node_count, + proposer_nodes, + post_merge_sim, + }, + context.clone(), + )) + }) + .await?; /* * One by one, add beacon nodes to the network. @@ -341,3 +296,88 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { Ok(()) } + +struct LocalNetworkParams { + eth1_block_time: Duration, + total_validator_count: usize, + deposit_amount: u64, + node_count: usize, + proposer_nodes: usize, + post_merge_sim: bool, +} + +async fn create_local_network( + LocalNetworkParams { + eth1_block_time, + total_validator_count, + deposit_amount, + node_count, + proposer_nodes, + post_merge_sim, + }: LocalNetworkParams, + context: RuntimeContext, +) -> Result<(LocalNetwork, ClientConfig), String> { + /* + * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit + * validators. + */ + let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; + let deposit_contract = anvil_eth1_instance.deposit_contract; + let chain_id = anvil_eth1_instance.anvil.chain_id(); + let anvil = anvil_eth1_instance.anvil; + let eth1_endpoint = + SensitiveUrl::parse(anvil.endpoint().as_str()).expect("Unable to parse anvil endpoint."); + let deposit_contract_address = deposit_contract.address(); + + // Start a timer that produces eth1 blocks on an interval. + tokio::spawn(async move { + let mut interval = tokio::time::interval(eth1_block_time); + loop { + interval.tick().await; + let _ = anvil.evm_mine().await; + } + }); + + // Submit deposits to the deposit contract. + tokio::spawn(async move { + for i in 0..total_validator_count { + println!("Submitting deposit for validator {}...", i); + let _ = deposit_contract + .deposit_deterministic_async::(i, deposit_amount) + .await; + } + }); + + let mut beacon_config = testing_client_config(); + + beacon_config.genesis = ClientGenesis::DepositContract; + beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint); + beacon_config.eth1.deposit_contract_address = deposit_contract_address; + beacon_config.eth1.deposit_contract_deploy_block = 0; + beacon_config.eth1.lowest_cached_block_number = 0; + beacon_config.eth1.follow_distance = 1; + beacon_config.eth1.node_far_behind_seconds = 20; + beacon_config.dummy_eth1_backend = false; + beacon_config.sync_eth1_chain = true; + beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; + beacon_config.eth1.chain_id = Eth1Id::from(chain_id); + beacon_config.network.target_peers = node_count + proposer_nodes - 1; + + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); + + if post_merge_sim { + let el_config = execution_layer::Config { + execution_endpoints: vec![SensitiveUrl::parse(&format!( + "http://localhost:{}", + EXECUTION_PORT + )) + .unwrap()], + ..Default::default() + }; + + beacon_config.execution_layer = Some(el_config); + } + + let network = LocalNetwork::new(context, beacon_config.clone()).await?; + Ok((network, beacon_config)) +} diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index a19777c5ab..e8af9c1806 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -21,6 +21,7 @@ mod cli; mod eth1_sim; mod local_network; mod no_eth1_sim; +mod retry; mod sync_sim; use cli::cli_app; diff --git a/testing/simulator/src/retry.rs b/testing/simulator/src/retry.rs new file mode 100644 index 0000000000..a4eb52cea1 --- /dev/null +++ b/testing/simulator/src/retry.rs @@ -0,0 +1,63 @@ +use std::fmt::Debug; +use std::future::Future; +use std::pin::Pin; + +/// Executes the function with a specified number of retries if the function returns an error. +/// Once it exceeds `max_retries` and still fails, the error is returned. +pub async fn with_retry(max_retries: usize, mut func: F) -> Result +where + F: FnMut() -> Pin>>>, + E: Debug, +{ + let mut retry_count = 0; + loop { + let result = Box::pin(func()).await; + if result.is_ok() || retry_count >= max_retries { + break result; + } + retry_count += 1; + + if let Err(e) = result { + eprintln!( + "Operation failed with error {:?}, retrying {} of {}", + e, retry_count, max_retries + ); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::VecDeque; + + async fn my_async_func(is_ok: bool) -> Result<(), ()> { + if is_ok { + Ok(()) + } else { + Err(()) + } + } + + #[tokio::test] + async fn test_with_retry_ok() { + let res = with_retry(3, || Box::pin(my_async_func(true))).await; + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_with_retry_2nd_ok() { + let mut mock_results = VecDeque::from([false, true]); + let res = with_retry(3, || { + Box::pin(my_async_func(mock_results.pop_front().unwrap())) + }) + .await; + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_with_retry_fail() { + let res = with_retry(3, || Box::pin(my_async_func(false))).await; + assert!(res.is_err()); + } +} From f2223feb21bb10dbdebce097c5cda384b1ff44a3 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 17 Jul 2023 00:14:19 +0000 Subject: [PATCH 14/75] Rust 1.71 lints (#4503) ## Issue Addressed N/A ## Proposed Changes Add lints for rust 1.71 [3789134](https://github.com/sigp/lighthouse/pull/4503/commits/3789134ae2b06cedbe8becf1db49b2d23689ef94) is probably the one that needs most attention as it changes beacon state code. I changed the `is_in_inactivity_leak ` function to return a `ArithError` as not all consumers of that function work well with a `BeaconState::Error`. --- .../beacon_chain/src/attestation_rewards.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 42 +++++++++---------- .../http_api/src/block_packing_efficiency.rs | 2 +- consensus/state_processing/src/lib.rs | 2 +- .../block_signature_verifier.rs | 2 +- .../src/per_block_processing/tests.rs | 2 +- .../altair/inactivity_updates.rs | 2 +- .../altair/rewards_and_penalties.rs | 2 +- .../state_processing/src/verify_operation.rs | 8 ++-- consensus/types/src/beacon_state.rs | 20 +++++---- .../types/src/beacon_state/committee_cache.rs | 2 +- .../types/src/beacon_state/tree_hash_cache.rs | 2 +- consensus/types/src/chain_spec.rs | 2 +- consensus/types/src/execution_payload.rs | 4 +- consensus/types/src/lib.rs | 2 +- consensus/types/src/participation_list.rs | 2 +- consensus/types/src/subnet_id.rs | 2 +- consensus/types/src/test_utils/mod.rs | 2 +- consensus/types/src/test_utils/test_random.rs | 2 +- slasher/tests/backend.rs | 2 +- 20 files changed, 56 insertions(+), 50 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index a4a661197f..460bf18bcf 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -86,7 +86,7 @@ impl BeaconChain { let ideal_reward = reward_numerator .safe_div(active_increments)? .safe_div(WEIGHT_DENOMINATOR)?; - if !state.is_in_inactivity_leak(previous_epoch, spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec)? { ideal_rewards_hashmap .insert((flag_index, effective_balance), (ideal_reward, penalty)); } else { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2720569c8d..d72686baf5 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -163,7 +163,7 @@ impl> BlockProposalContents ExecutionLayer { BlockProposalContents::Payload { payload: relay.data.message.header, block_value: relay.data.message.value, - _phantom: PhantomData::default(), + _phantom: PhantomData, }, )), Err(reason) if !reason.payload_invalid() => { @@ -913,7 +913,7 @@ impl ExecutionLayer { BlockProposalContents::Payload { payload: relay.data.message.header, block_value: relay.data.message.value, - _phantom: PhantomData::default(), + _phantom: PhantomData, }, )), // If the payload is valid then use it. The local EE failed @@ -922,7 +922,7 @@ impl ExecutionLayer { BlockProposalContents::Payload { payload: relay.data.message.header, block_value: relay.data.message.value, - _phantom: PhantomData::default(), + _phantom: PhantomData, }, )), Err(reason) => { @@ -1129,7 +1129,7 @@ impl ExecutionLayer { Ok(BlockProposalContents::Payload { payload: execution_payload.into(), block_value, - _phantom: PhantomData::default(), + _phantom: PhantomData, }) }) .await @@ -2018,6 +2018,22 @@ async fn timed_future, T>(metric: &str, future: F) -> (T, (result, duration) } +fn noop( + _: &ExecutionLayer, + _: ExecutionPayloadRef, +) -> Option> { + None +} + +#[cfg(test)] +/// Returns the duration since the unix epoch. +fn timestamp_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_secs() +} + #[cfg(test)] mod test { use super::*; @@ -2164,19 +2180,3 @@ mod test { .await; } } - -fn noop( - _: &ExecutionLayer, - _: ExecutionPayloadRef, -) -> Option> { - None -} - -#[cfg(test)] -/// Returns the duration since the unix epoch. -fn timestamp_now() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) - .as_secs() -} diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 1b924f3828..e099e130a8 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -75,7 +75,7 @@ impl PackingEfficiencyHandler { available_attestations: HashSet::new(), included_attestations: HashMap::new(), committee_store: CommitteeStore::new(), - _phantom: PhantomData::default(), + _phantom: PhantomData, }; handler.compute_epoch(start_epoch, &starting_state, spec)?; diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 7340206a34..a3ee725406 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -2,7 +2,7 @@ #![cfg_attr( not(test), deny( - clippy::integer_arithmetic, + clippy::arithmetic_side_effects, clippy::disallowed_methods, clippy::indexing_slicing, clippy::unwrap_used, diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 709302eec1..22309334fa 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use super::signature_sets::{Error as SignatureSetError, *}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index ddb9ca6ad5..16fa2462f5 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -960,7 +960,7 @@ async fn fork_spanning_exit() { spec.bellatrix_fork_epoch = Some(Epoch::new(4)); spec.shard_committee_period = 0; - let harness = BeaconChainHarness::builder(MainnetEthSpec::default()) + let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) .mock_execution_layer() diff --git a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs index 967f642e85..a895567d12 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs @@ -34,7 +34,7 @@ pub fn process_inactivity_updates( .safe_add_assign(spec.inactivity_score_bias)?; } // Decrease the score of all validators for forgiveness when not during a leak - if !state.is_in_inactivity_leak(previous_epoch, spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec)? { let inactivity_score = state.get_inactivity_score_mut(index)?; inactivity_score .safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index e2aa67a619..19d57130c9 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -77,7 +77,7 @@ pub fn get_flag_index_deltas( let mut delta = Delta::default(); if unslashed_participating_indices.contains(index)? { - if !state.is_in_inactivity_leak(previous_epoch, spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec)? { let reward_numerator = base_reward .safe_mul(weight)? .safe_mul(unslashed_participating_increments)?; diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 864844080f..b3924cd973 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -138,7 +138,7 @@ impl VerifyOperation for SignedVoluntaryExit { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![self.message.epoch] } @@ -156,7 +156,7 @@ impl VerifyOperation for AttesterSlashing { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![ self.attestation_1.data.target.epoch, @@ -177,7 +177,7 @@ impl VerifyOperation for ProposerSlashing { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { // Only need a single epoch because the slots of the two headers must be equal. smallvec![self @@ -200,7 +200,7 @@ impl VerifyOperation for SignedBlsToExecutionChange { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![] } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 1fa4dee3a0..6a205e307a 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -442,7 +442,7 @@ impl BeaconState { } /// Specialised deserialisation method that uses the `ChainSpec` as context. - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); @@ -1734,16 +1734,22 @@ impl BeaconState { previous_epoch: Epoch, val_index: usize, ) -> Result { - self.get_validator(val_index).map(|val| { - val.is_active_at(previous_epoch) - || (val.slashed && previous_epoch + Epoch::new(1) < val.withdrawable_epoch) - }) + let val = self.get_validator(val_index)?; + Ok(val.is_active_at(previous_epoch) + || (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch)) } /// Passing `previous_epoch` to this function rather than computing it internally provides /// a tangible speed improvement in state processing. - pub fn is_in_inactivity_leak(&self, previous_epoch: Epoch, spec: &ChainSpec) -> bool { - (previous_epoch - self.finalized_checkpoint().epoch) > spec.min_epochs_to_inactivity_penalty + pub fn is_in_inactivity_leak( + &self, + previous_epoch: Epoch, + spec: &ChainSpec, + ) -> Result { + Ok( + (previous_epoch.safe_sub(self.finalized_checkpoint().epoch)?) + > spec.min_epochs_to_inactivity_penalty, + ) } /// Get the `SyncCommittee` associated with the next slot. Useful because sync committees diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 8afef1183b..2db8fbe763 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use super::BeaconState; use crate::*; diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index d1d63e3c80..69cd6fbb87 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] #![allow(clippy::disallowed_methods)] #![allow(clippy::indexing_slicing)] diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 5957182230..fbb6a3d857 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -451,7 +451,7 @@ impl ChainSpec { Hash256::from(domain) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] pub const fn attestation_subnet_prefix_bits(&self) -> u32 { let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2(); self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 77ef6407e8..690138da67 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -106,7 +106,7 @@ impl ExecutionPayload { } } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_merge_size() -> usize { // Fixed part @@ -117,7 +117,7 @@ impl ExecutionPayload { + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_capella_size() -> usize { // Fixed part diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index aefb45490a..874d7cd2bd 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -3,7 +3,7 @@ #![cfg_attr( not(test), deny( - clippy::integer_arithmetic, + clippy::arithmetic_side_effects, clippy::disallowed_methods, clippy::indexing_slicing ) diff --git a/consensus/types/src/participation_list.rs b/consensus/types/src/participation_list.rs index 89a56cb87d..be119fbef2 100644 --- a/consensus/types/src/participation_list.rs +++ b/consensus/types/src/participation_list.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use crate::{Hash256, ParticipationFlags, Unsigned, VariableList}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache}; diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 6793fe5574..eb25b57b0d 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -72,7 +72,7 @@ impl SubnetId { .into()) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] /// Computes the set of subnets the node should be subscribed to during the current epoch, /// along with the first epoch in which these subscriptions are no longer valid. pub fn compute_subnets_for_epoch( diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index c0333bcfd6..d172342ee6 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use std::fmt::Debug; diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 43396dedc0..51b79d8d53 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -28,7 +28,7 @@ pub trait TestRandom { impl TestRandom for PhantomData { fn random_for_test(_rng: &mut impl RngCore) -> Self { - PhantomData::default() + PhantomData } } diff --git a/slasher/tests/backend.rs b/slasher/tests/backend.rs index 9e68107de7..fd1a6ae14f 100644 --- a/slasher/tests/backend.rs +++ b/slasher/tests/backend.rs @@ -1,4 +1,4 @@ -#![cfg(all(feature = "lmdb"))] +#![cfg(feature = "lmdb")] use slasher::{config::MDBX_DATA_FILENAME, Config, DatabaseBackend, DatabaseBackendOverride}; use std::fs::File; From a25ec16a6751574c009fa30dbeb62a6ca1ca030d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 17 Jul 2023 00:14:20 +0000 Subject: [PATCH 15/75] Speed up CI by installing foundry with Github action (#4505) ## Issue Addressed Speed up CI by installing foundry with Github action instead of building Anvil from source. Building anvil from source on GItHub hosted runners currently takes about 10 mins. Using the `foundry-toolchain` action to install only takes about 2 seconds. --- .github/workflows/test-suite.yml | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index e6b75ea7b1..e3342ac370 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -58,8 +58,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in release run: make test-release release-tests-windows: @@ -78,9 +78,8 @@ jobs: run: | choco install python protoc visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - - name: Install anvil - # Extra feature to work around https://github.com/foundry-rs/foundry/issues/5115 - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil --features ethers/ipc + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Install make run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 @@ -141,8 +140,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in debug run: make test-debug state-transition-vectors-ubuntu: @@ -197,8 +196,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim merge-transition-ubuntu: @@ -213,8 +212,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim and go through the merge transition run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: @@ -243,8 +242,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run the syncing simulator run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: From 4435a222219888c41e4bfe79c310367c16b73ef0 Mon Sep 17 00:00:00 2001 From: Divma Date: Mon, 17 Jul 2023 05:31:53 +0000 Subject: [PATCH 16/75] Cleanup unreachable code in `lcli::generate_bootnode_enr` and some tests (#4485) ## Issue Addressed n/a Noticed this while working on something else ## Proposed Changes - leverage the appropriate types to avoid a bunch of `unwrap` and errors ## Additional Info n/a --- .../src/discovery/enr_ext.rs | 19 ++++++++++++------- .../lighthouse_network/src/discovery/mod.rs | 10 ++++++---- .../lighthouse_network/src/types/globals.rs | 5 ++--- lcli/src/generate_bootnode_enr.rs | 14 ++++++-------- 4 files changed, 26 insertions(+), 22 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 3df7f7c16f..5ce0c55cac 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -1,7 +1,10 @@ //! ENR extension trait to support libp2p integration. use crate::{Enr, Multiaddr, PeerId}; use discv5::enr::{CombinedKey, CombinedPublicKey}; -use libp2p::core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}; +use libp2p::{ + core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}, + identity::secp256k1, +}; use tiny_keccak::{Hasher, Keccak}; /// Extend ENR for libp2p types. @@ -36,6 +39,8 @@ pub trait CombinedKeyPublicExt { pub trait CombinedKeyExt { /// Converts a libp2p key into an ENR combined key. fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result; + /// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`]. + fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey; } impl EnrExt for Enr { @@ -220,12 +225,7 @@ impl CombinedKeyPublicExt for CombinedPublicKey { impl CombinedKeyExt for CombinedKey { fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result { match key { - Keypair::Secp256k1(key) => { - let secret = - discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) - .expect("libp2p key must be valid"); - Ok(CombinedKey::Secp256k1(secret)) - } + Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)), Keypair::Ed25519(key) => { let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes( &(key.encode()[..32]) @@ -237,6 +237,11 @@ impl CombinedKeyExt for CombinedKey { Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), } } + fn from_secp256k1(key: &secp256k1::Keypair) -> Self { + let secret = discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) + .expect("libp2p key must be valid"); + CombinedKey::Secp256k1(secret) + } } // helper function to convert a peer_id to a node_id. This is only possible for secp256k1/ed25519 libp2p diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 3ee74ebf01..d4d0baef6b 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1101,6 +1101,7 @@ mod tests { use super::*; use crate::rpc::methods::{MetaData, MetaDataV2}; use enr::EnrBuilder; + use libp2p::identity::secp256k1; use slog::{o, Drain}; use types::{BitVector, MinimalEthSpec, SubnetId}; @@ -1119,10 +1120,10 @@ mod tests { } async fn build_discovery() -> Discovery { - let keypair = libp2p::identity::Keypair::generate_secp256k1(); + let keypair = secp256k1::Keypair::generate(); let mut config = NetworkConfig::default(); config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); - let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); + let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( @@ -1138,6 +1139,7 @@ mod tests { false, &log, ); + let keypair = Keypair::Secp256k1(keypair); Discovery::new(&keypair, &config, Arc::new(globals), &log) .await .unwrap() @@ -1184,8 +1186,8 @@ mod tests { fn make_enr(subnet_ids: Vec) -> Enr { let mut builder = EnrBuilder::new("v4"); - let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); + let keypair = secp256k1::Keypair::generate(); + let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); // set the "attnets" field on our ENR let mut bitfield = BitVector::::new(); diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 295616f36b..97eaaa0051 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -134,9 +134,8 @@ impl NetworkGlobals { log: &slog::Logger, ) -> NetworkGlobals { use crate::CombinedKeyExt; - let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let enr_key: discv5::enr::CombinedKey = - discv5::enr::CombinedKey::from_libp2p(&keypair).unwrap(); + let keypair = libp2p::identity::secp256k1::Keypair::generate(); + let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); NetworkGlobals::new( enr, diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 8662a80476..0584cd6549 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -1,6 +1,7 @@ use clap::ArgMatches; use lighthouse_network::{ - discovery::{build_enr, CombinedKey, CombinedKeyExt, Keypair, ENR_FILENAME}, + discovery::{build_enr, CombinedKey, CombinedKeyExt, ENR_FILENAME}, + libp2p::identity::secp256k1, NetworkConfig, NETWORK_KEY_FILENAME, }; use std::fs::File; @@ -29,8 +30,8 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { config.enr_udp4_port = Some(udp_port); config.enr_tcp6_port = Some(tcp_port); - let local_keypair = Keypair::generate_secp256k1(); - let enr_key = CombinedKey::from_libp2p(&local_keypair)?; + let secp256k1_keypair = secp256k1::Keypair::generate(); + let enr_key = CombinedKey::from_secp256k1(&secp256k1_keypair); let enr_fork_id = EnrForkId { fork_digest: ChainSpec::compute_fork_digest(genesis_fork_version, Hash256::zero()), next_fork_version: genesis_fork_version, @@ -47,13 +48,10 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { .write_all(enr.to_base64().as_bytes()) .map_err(|e| format!("Unable to write ENR to {}: {:?}", ENR_FILENAME, e))?; - let secret_bytes = match local_keypair { - Keypair::Secp256k1(key) => key.secret().to_bytes(), - _ => return Err("Key is not a secp256k1 key".into()), - }; - let mut key_file = File::create(output_dir.join(NETWORK_KEY_FILENAME)) .map_err(|e| format!("Unable to create {}: {:?}", NETWORK_KEY_FILENAME, e))?; + + let secret_bytes = secp256k1_keypair.secret().to_bytes(); key_file .write_all(&secret_bytes) .map_err(|e| format!("Unable to write key to {}: {:?}", NETWORK_KEY_FILENAME, e))?; From fc7f1ba6b94ac3300338aad4d5168aaac5df3fac Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 18 Jul 2023 01:48:40 +0000 Subject: [PATCH 17/75] Phase 0 attestation rewards via Beacon API (#4474) ## Issue Addressed Addresses #4026. Beacon-API spec [here](https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/getAttestationsRewards). Endpoint: `POST /eth/v1/beacon/rewards/attestations/{epoch}` This endpoint already supports post-Altair epochs. This PR adds support for phase 0 rewards calculation. ## Proposed Changes - [x] Attestation rewards API to support phase 0 rewards calculation, re-using logic from `state_processing`. Refactored `get_attestation_deltas` slightly to support computing deltas for a subset of validators. - [x] Add `inclusion_delay` to `ideal_rewards` (`beacon-API` spec update to follow) - [x] Add `inactivity` penalties to both `ideal_rewards` and `total_rewards` (`beacon-API` spec update to follow) - [x] Add tests to compute attestation rewards and compare results with beacon states ## Additional Notes - The extra penalty for missing attestations or being slashed during an inactivity leak is currently not included in the API response (for both phase 0 and Altair) in the spec. - I went with adding `inactivity` as a separate component rather than combining them with the 4 rewards, because this is how it was grouped in [the phase 0 spec](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#get_attestation_deltas). During inactivity leak, all rewards include the optimal reward, and inactivity penalties are calculated separately (see below code snippet from the spec), so it would be quite confusing if we merge them. This would also work better with Altair, because there's no "cancelling" of rewards and inactivity penalties are more separate. - Altair calculation logic (to include inactivity penalties) to be updated in a follow-up PR. ```python def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return attestation reward/penalty deltas for each validator. """ source_rewards, source_penalties = get_source_deltas(state) target_rewards, target_penalties = get_target_deltas(state) head_rewards, head_penalties = get_head_deltas(state) inclusion_delay_rewards, _ = get_inclusion_delay_deltas(state) _, inactivity_penalties = get_inactivity_penalty_deltas(state) rewards = [ source_rewards[i] + target_rewards[i] + head_rewards[i] + inclusion_delay_rewards[i] for i in range(len(state.validators)) ] penalties = [ source_penalties[i] + target_penalties[i] + head_penalties[i] + inactivity_penalties[i] for i in range(len(state.validators)) ] return rewards, penalties ``` ## Example API Response
Click me ```json { "ideal_rewards": [ { "effective_balance": "1000000000", "head": "6638", "target": "6638", "source": "6638", "inclusion_delay": "9783", "inactivity": "0" }, { "effective_balance": "2000000000", "head": "13276", "target": "13276", "source": "13276", "inclusion_delay": "19565", "inactivity": "0" }, { "effective_balance": "3000000000", "head": "19914", "target": "19914", "source": "19914", "inclusion_delay": "29349", "inactivity": "0" }, { "effective_balance": "4000000000", "head": "26553", "target": "26553", "source": "26553", "inclusion_delay": "39131", "inactivity": "0" }, { "effective_balance": "5000000000", "head": "33191", "target": "33191", "source": "33191", "inclusion_delay": "48914", "inactivity": "0" }, { "effective_balance": "6000000000", "head": "39829", "target": "39829", "source": "39829", "inclusion_delay": "58697", "inactivity": "0" }, { "effective_balance": "7000000000", "head": "46468", "target": "46468", "source": "46468", "inclusion_delay": "68480", "inactivity": "0" }, { "effective_balance": "8000000000", "head": "53106", "target": "53106", "source": "53106", "inclusion_delay": "78262", "inactivity": "0" }, { "effective_balance": "9000000000", "head": "59744", "target": "59744", "source": "59744", "inclusion_delay": "88046", "inactivity": "0" }, { "effective_balance": "10000000000", "head": "66383", "target": "66383", "source": "66383", "inclusion_delay": "97828", "inactivity": "0" }, { "effective_balance": "11000000000", "head": "73021", "target": "73021", "source": "73021", "inclusion_delay": "107611", "inactivity": "0" }, { "effective_balance": "12000000000", "head": "79659", "target": "79659", "source": "79659", "inclusion_delay": "117394", "inactivity": "0" }, { "effective_balance": "13000000000", "head": "86298", "target": "86298", "source": "86298", "inclusion_delay": "127176", "inactivity": "0" }, { "effective_balance": "14000000000", "head": "92936", "target": "92936", "source": "92936", "inclusion_delay": "136959", "inactivity": "0" }, { "effective_balance": "15000000000", "head": "99574", "target": "99574", "source": "99574", "inclusion_delay": "146742", "inactivity": "0" }, { "effective_balance": "16000000000", "head": "106212", "target": "106212", "source": "106212", "inclusion_delay": "156525", "inactivity": "0" }, { "effective_balance": "17000000000", "head": "112851", "target": "112851", "source": "112851", "inclusion_delay": "166307", "inactivity": "0" }, { "effective_balance": "18000000000", "head": "119489", "target": "119489", "source": "119489", "inclusion_delay": "176091", "inactivity": "0" }, { "effective_balance": "19000000000", "head": "126127", "target": "126127", "source": "126127", "inclusion_delay": "185873", "inactivity": "0" }, { "effective_balance": "20000000000", "head": "132766", "target": "132766", "source": "132766", "inclusion_delay": "195656", "inactivity": "0" }, { "effective_balance": "21000000000", "head": "139404", "target": "139404", "source": "139404", "inclusion_delay": "205439", "inactivity": "0" }, { "effective_balance": "22000000000", "head": "146042", "target": "146042", "source": "146042", "inclusion_delay": "215222", "inactivity": "0" }, { "effective_balance": "23000000000", "head": "152681", "target": "152681", "source": "152681", "inclusion_delay": "225004", "inactivity": "0" }, { "effective_balance": "24000000000", "head": "159319", "target": "159319", "source": "159319", "inclusion_delay": "234787", "inactivity": "0" }, { "effective_balance": "25000000000", "head": "165957", "target": "165957", "source": "165957", "inclusion_delay": "244570", "inactivity": "0" }, { "effective_balance": "26000000000", "head": "172596", "target": "172596", "source": "172596", "inclusion_delay": "254352", "inactivity": "0" }, { "effective_balance": "27000000000", "head": "179234", "target": "179234", "source": "179234", "inclusion_delay": "264136", "inactivity": "0" }, { "effective_balance": "28000000000", "head": "185872", "target": "185872", "source": "185872", "inclusion_delay": "273918", "inactivity": "0" }, { "effective_balance": "29000000000", "head": "192510", "target": "192510", "source": "192510", "inclusion_delay": "283701", "inactivity": "0" }, { "effective_balance": "30000000000", "head": "199149", "target": "199149", "source": "199149", "inclusion_delay": "293484", "inactivity": "0" }, { "effective_balance": "31000000000", "head": "205787", "target": "205787", "source": "205787", "inclusion_delay": "303267", "inactivity": "0" }, { "effective_balance": "32000000000", "head": "212426", "target": "212426", "source": "212426", "inclusion_delay": "313050", "inactivity": "0" } ], "total_rewards": [ { "validator_index": "0", "head": "212426", "target": "212426", "source": "212426", "inclusion_delay": "313050", "inactivity": "0" }, { "validator_index": "32", "head": "212426", "target": "212426", "source": "212426", "inclusion_delay": "313050", "inactivity": "0" }, { "validator_index": "63", "head": "-357771", "target": "-357771", "source": "-357771", "inclusion_delay": "0", "inactivity": "0" } ] } ```
--- Cargo.lock | 1 + beacon_node/beacon_chain/Cargo.toml | 1 + .../beacon_chain/src/attestation_rewards.rs | 238 ++++++++++++++++-- beacon_node/beacon_chain/src/errors.rs | 4 +- beacon_node/beacon_chain/tests/rewards.rs | 192 +++++++++++++- beacon_node/http_api/src/lib.rs | 8 +- .../src/lighthouse/attestation_rewards.rs | 19 +- consensus/state_processing/src/common/base.rs | 12 + .../src/per_epoch_processing/base.rs | 2 +- .../base/rewards_and_penalties.rs | 101 +++++--- .../ef_tests/src/cases/epoch_processing.rs | 2 +- testing/ef_tests/src/cases/rewards.rs | 2 +- 12 files changed, 513 insertions(+), 69 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4bb3ab99e2..fda8cd761f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -630,6 +630,7 @@ dependencies = [ "eth1", "eth2", "ethereum_hashing", + "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 7f884f561b..b537327fb3 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -27,6 +27,7 @@ operation_pool = { path = "../operation_pool" } rayon = "1.4.1" serde = "1.0.116" serde_derive = "1.0.116" +ethereum_serde_utils = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = { version = "2.1.1", features = ["json"] } slot_clock = { path = "../../common/slot_clock" } diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 460bf18bcf..9fc21b668f 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -3,7 +3,8 @@ use eth2::lighthouse::attestation_rewards::{IdealAttestationRewards, TotalAttest use eth2::lighthouse::StandardAttestationRewards; use participation_cache::ParticipationCache; use safe_arith::SafeArith; -use slog::{debug, Logger}; +use serde_utils::quoted_u64::Quoted; +use slog::debug; use state_processing::{ common::altair::BaseRewardPerIncrement, per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, @@ -15,32 +16,111 @@ use store::consts::altair::{ }; use types::consts::altair::WEIGHT_DENOMINATOR; -use types::{Epoch, EthSpec}; +use types::{BeaconState, Epoch, EthSpec}; use eth2::types::ValidatorId; +use state_processing::common::base::get_base_reward_from_effective_balance; +use state_processing::per_epoch_processing::base::rewards_and_penalties::{ + get_attestation_component_delta, get_attestation_deltas_all, get_attestation_deltas_subset, + get_inactivity_penalty_delta, get_inclusion_delay_delta, +}; +use state_processing::per_epoch_processing::base::validator_statuses::InclusionInfo; +use state_processing::per_epoch_processing::base::{ + TotalBalances, ValidatorStatus, ValidatorStatuses, +}; impl BeaconChain { pub fn compute_attestation_rewards( &self, epoch: Epoch, validators: Vec, - log: Logger, ) -> Result { - debug!(log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); + debug!(self.log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); // Get state - let spec = &self.spec; - let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); let state_root = self .state_root_at_slot(state_slot)? .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; - let mut state = self + let state = self .get_state(&state_root, Some(state_slot))? .ok_or(BeaconChainError::MissingBeaconState(state_root))?; + match state { + BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators), + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + self.compute_attestation_rewards_altair(state, validators) + } + } + } + + fn compute_attestation_rewards_base( + &self, + mut state: BeaconState, + validators: Vec, + ) -> Result { + let spec = &self.spec; + let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; + validator_statuses.process_attestations(&state)?; + + let ideal_rewards = + self.compute_ideal_rewards_base(&state, &validator_statuses.total_balances)?; + + let indices_to_attestation_delta = if validators.is_empty() { + get_attestation_deltas_all(&state, &validator_statuses, spec)? + .into_iter() + .enumerate() + .collect() + } else { + let validator_indices = Self::validators_ids_to_indices(&mut state, validators)?; + get_attestation_deltas_subset(&state, &validator_statuses, &validator_indices, spec)? + }; + + let mut total_rewards = vec![]; + + for (index, delta) in indices_to_attestation_delta.into_iter() { + let head_delta = delta.head_delta; + let head = (head_delta.rewards as i64).safe_sub(head_delta.penalties as i64)?; + + let target_delta = delta.target_delta; + let target = (target_delta.rewards as i64).safe_sub(target_delta.penalties as i64)?; + + let source_delta = delta.source_delta; + let source = (source_delta.rewards as i64).safe_sub(source_delta.penalties as i64)?; + + // No penalties associated with inclusion delay + let inclusion_delay = delta.inclusion_delay_delta.rewards; + let inactivity = delta.inactivity_penalty_delta.penalties.wrapping_neg() as i64; + + let rewards = TotalAttestationRewards { + validator_index: index as u64, + head, + target, + source, + inclusion_delay: Some(Quoted { + value: inclusion_delay, + }), + inactivity, + }; + + total_rewards.push(rewards); + } + + Ok(StandardAttestationRewards { + ideal_rewards, + total_rewards, + }) + } + + fn compute_attestation_rewards_altair( + &self, + mut state: BeaconState, + validators: Vec, + ) -> Result { + let spec = &self.spec; + // Calculate ideal_rewards let participation_cache = ParticipationCache::new(&state, spec)?; @@ -71,7 +151,7 @@ impl BeaconChain { let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; - for effective_balance_eth in 0..=32 { + for effective_balance_eth in 1..=self.max_effective_balance_increment_steps()? { let effective_balance = effective_balance_eth.safe_mul(spec.effective_balance_increment)?; let base_reward = @@ -101,20 +181,12 @@ impl BeaconChain { let validators = if validators.is_empty() { participation_cache.eligible_validator_indices().to_vec() } else { - validators - .into_iter() - .map(|validator| match validator { - ValidatorId::Index(i) => Ok(i as usize), - ValidatorId::PublicKey(pubkey) => state - .get_validator_index(&pubkey)? - .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), - }) - .collect::, _>>()? + Self::validators_ids_to_indices(&mut state, validators)? }; for validator_index in &validators { let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?; - let mut head_reward = 0u64; + let mut head_reward = 0i64; let mut target_reward = 0i64; let mut source_reward = 0i64; @@ -132,7 +204,7 @@ impl BeaconChain { .map_err(|_| BeaconChainError::AttestationRewardsError)?; if voted_correctly { if flag_index == TIMELY_HEAD_FLAG_INDEX { - head_reward += ideal_reward; + head_reward += *ideal_reward as i64; } else if flag_index == TIMELY_TARGET_FLAG_INDEX { target_reward += *ideal_reward as i64; } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { @@ -152,6 +224,9 @@ impl BeaconChain { head: head_reward, target: target_reward, source: source_reward, + inclusion_delay: None, + // TODO: altair calculation logic needs to be updated to include inactivity penalty + inactivity: 0, }); } @@ -173,6 +248,9 @@ impl BeaconChain { head: 0, target: 0, source: 0, + inclusion_delay: None, + // TODO: altair calculation logic needs to be updated to include inactivity penalty + inactivity: 0, }); match *flag_index { TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, @@ -192,4 +270,126 @@ impl BeaconChain { total_rewards, }) } + + fn max_effective_balance_increment_steps(&self) -> Result { + let spec = &self.spec; + let max_steps = spec + .max_effective_balance + .safe_div(spec.effective_balance_increment)?; + Ok(max_steps) + } + + fn validators_ids_to_indices( + state: &mut BeaconState, + validators: Vec, + ) -> Result, BeaconChainError> { + let indices = validators + .into_iter() + .map(|validator| match validator { + ValidatorId::Index(i) => Ok(i as usize), + ValidatorId::PublicKey(pubkey) => state + .get_validator_index(&pubkey)? + .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), + }) + .collect::, _>>()?; + Ok(indices) + } + + fn compute_ideal_rewards_base( + &self, + state: &BeaconState, + total_balances: &TotalBalances, + ) -> Result, BeaconChainError> { + let spec = &self.spec; + let previous_epoch = state.previous_epoch(); + let finality_delay = previous_epoch + .safe_sub(state.finalized_checkpoint().epoch)? + .as_u64(); + + let ideal_validator_status = ValidatorStatus { + is_previous_epoch_attester: true, + is_slashed: false, + inclusion_info: Some(InclusionInfo { + delay: 1, + ..Default::default() + }), + ..Default::default() + }; + + let mut ideal_attestation_rewards_list = Vec::new(); + + for effective_balance_step in 1..=self.max_effective_balance_increment_steps()? { + let effective_balance = + effective_balance_step.safe_mul(spec.effective_balance_increment)?; + let base_reward = get_base_reward_from_effective_balance::( + effective_balance, + total_balances.current_epoch(), + spec, + )?; + + // compute ideal head rewards + let head = get_attestation_component_delta( + true, + total_balances.previous_epoch_attesters(), + total_balances, + base_reward, + finality_delay, + spec, + )? + .rewards; + + // compute ideal target rewards + let target = get_attestation_component_delta( + true, + total_balances.previous_epoch_target_attesters(), + total_balances, + base_reward, + finality_delay, + spec, + )? + .rewards; + + // compute ideal source rewards + let source = get_attestation_component_delta( + true, + total_balances.previous_epoch_head_attesters(), + total_balances, + base_reward, + finality_delay, + spec, + )? + .rewards; + + // compute ideal inclusion delay rewards + let inclusion_delay = + get_inclusion_delay_delta(&ideal_validator_status, base_reward, spec)? + .0 + .rewards; + + // compute inactivity penalty + let inactivity = get_inactivity_penalty_delta( + &ideal_validator_status, + base_reward, + finality_delay, + spec, + )? + .penalties + .wrapping_neg() as i64; + + let ideal_attestation_rewards = IdealAttestationRewards { + effective_balance, + head, + target, + source, + inclusion_delay: Some(Quoted { + value: inclusion_delay, + }), + inactivity, + }; + + ideal_attestation_rewards_list.push(ideal_attestation_rewards); + } + + Ok(ideal_attestation_rewards_list) + } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 50bcf42653..8714131150 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -24,7 +24,7 @@ use state_processing::{ }, signature_sets::Error as SignatureSetError, state_advance::Error as StateAdvanceError, - BlockProcessingError, BlockReplayError, SlotProcessingError, + BlockProcessingError, BlockReplayError, EpochProcessingError, SlotProcessingError, }; use std::time::Duration; use task_executor::ShutdownReason; @@ -60,6 +60,7 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), + EpochProcessingError(EpochProcessingError), StateAdvanceError(StateAdvanceError), UnableToAdvanceState(String), NoStateForAttestation { @@ -217,6 +218,7 @@ pub enum BeaconChainError { } easy_from_to!(SlotProcessingError, BeaconChainError); +easy_from_to!(EpochProcessingError, BeaconChainError); easy_from_to!(AttestationValidationError, BeaconChainError); easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError); easy_from_to!(ExitValidationError, BeaconChainError); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index b61bea1242..be271804b9 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -9,19 +9,22 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, }; +use eth2::lighthouse::attestation_rewards::TotalAttestationRewards; +use eth2::lighthouse::StandardAttestationRewards; +use eth2::types::ValidatorId; use lazy_static::lazy_static; +use types::beacon_state::Error as BeaconStateError; +use types::{BeaconState, ChainSpec}; pub const VALIDATOR_COUNT: usize = 64; +type E = MinimalEthSpec; + lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); } -fn get_harness() -> BeaconChainHarness> { - let mut spec = E::default_spec(); - - spec.altair_fork_epoch = Some(Epoch::new(0)); // We use altair for all tests - +fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .keypairs(KEYPAIRS.to_vec()) @@ -35,8 +38,11 @@ fn get_harness() -> BeaconChainHarness> { #[tokio::test] async fn test_sync_committee_rewards() { - let num_block_produced = MinimalEthSpec::slots_per_epoch(); - let harness = get_harness::(); + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + + let harness = get_harness(spec); + let num_block_produced = E::slots_per_epoch(); let latest_block_root = harness .extend_chain( @@ -119,3 +125,175 @@ async fn test_sync_committee_rewards() { mismatches.join(",") ); } + +#[tokio::test] +async fn test_verify_attestation_rewards_base() { + let harness = get_harness(E::default_spec()); + + // epoch 0 (N), only two thirds of validators vote. + let two_thirds = (VALIDATOR_COUNT / 3) * 2; + let two_thirds_validators: Vec = (0..two_thirds).collect(); + harness + .extend_chain( + E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(two_thirds_validators), + ) + .await; + + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + // extend slots to beginning of epoch N + 2 + harness.extend_slots(E::slots_per_epoch() as usize).await; + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(0), vec![]) + .unwrap(); + + // assert no inactivity penalty for both ideal rewards and individual validators + assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0)); + assert!(total_rewards.iter().all(|reward| reward.inactivity == 0)); + + // apply attestation rewards to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + assert_eq!(expected_balances, balances); +} + +#[tokio::test] +async fn test_verify_attestation_rewards_base_inactivity_leak() { + let spec = E::default_spec(); + let harness = get_harness(spec.clone()); + + let half = VALIDATOR_COUNT / 2; + let half_validators: Vec = (0..half).collect(); + // target epoch is the epoch where the chain enters inactivity leak + let target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; + + // advance until beginning of epoch N + 1 and get balances + harness + .extend_chain( + (E::slots_per_epoch() * (target_epoch + 1)) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(half_validators.clone()), + ) + .await; + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + // extend slots to beginning of epoch N + 2 + harness.advance_slot(); + harness + .extend_chain( + E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(half_validators), + ) + .await; + let _slot = harness.get_current_slot(); + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert inactivity penalty for both ideal rewards and individual validators + assert!(ideal_rewards.iter().all(|reward| reward.inactivity < 0)); + assert!(total_rewards.iter().all(|reward| reward.inactivity < 0)); + + // apply attestation rewards to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + assert_eq!(expected_balances, balances); +} + +#[tokio::test] +async fn test_verify_attestation_rewards_base_subset_only() { + let harness = get_harness(E::default_spec()); + + // epoch 0 (N), only two thirds of validators vote. + let two_thirds = (VALIDATOR_COUNT / 3) * 2; + let two_thirds_validators: Vec = (0..two_thirds).collect(); + harness + .extend_chain( + E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(two_thirds_validators), + ) + .await; + + // a small subset of validators to compute attestation rewards for + let validators_subset = [0, VALIDATOR_COUNT / 2, VALIDATOR_COUNT - 1]; + + // capture balances before transitioning to N + 2 + let initial_balances = get_validator_balances(harness.get_current_state(), &validators_subset); + + // extend slots to beginning of epoch N + 2 + harness.extend_slots(E::slots_per_epoch() as usize).await; + + let validators_subset_ids: Vec = validators_subset + .into_iter() + .map(|idx| ValidatorId::Index(idx as u64)) + .collect(); + + // compute reward deltas for the subset of validators in epoch N + let StandardAttestationRewards { + ideal_rewards: _, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(0), validators_subset_ids) + .unwrap(); + + // apply attestation rewards to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + + // verify expected balances against actual balances + let balances = get_validator_balances(harness.get_current_state(), &validators_subset); + assert_eq!(expected_balances, balances); +} + +/// Apply a vec of `TotalAttestationRewards` to initial balances, and return +fn apply_attestation_rewards( + initial_balances: &[u64], + attestation_rewards: Vec, +) -> Vec { + initial_balances + .iter() + .zip(attestation_rewards) + .map(|(&initial_balance, rewards)| { + let expected_balance = initial_balance as i64 + + rewards.head + + rewards.source + + rewards.target + + rewards.inclusion_delay.map(|q| q.value).unwrap_or(0) as i64 + + rewards.inactivity; + expected_balance as u64 + }) + .collect::>() +} + +fn get_validator_balances(state: BeaconState, validators: &[usize]) -> Vec { + validators + .iter() + .flat_map(|&id| { + state + .balances() + .get(id) + .cloned() + .ok_or(BeaconStateError::BalancesOutOfBounds(id)) + }) + .collect() +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 27bcc4d8a1..1f93f46110 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2034,15 +2034,11 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path::end()) .and(warp::body::json()) - .and(log_filter.clone()) .and_then( - |chain: Arc>, - epoch: Epoch, - validators: Vec, - log: Logger| { + |chain: Arc>, epoch: Epoch, validators: Vec| { blocking_json_task(move || { let attestation_rewards = chain - .compute_attestation_rewards(epoch, validators, log) + .compute_attestation_rewards(epoch, validators) .map_err(|e| match e { BeaconChainError::MissingBeaconState(root) => { warp_utils::reject::custom_not_found(format!( diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs index bebd1c661b..fa3f93d06f 100644 --- a/common/eth2/src/lighthouse/attestation_rewards.rs +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use serde_utils::quoted_u64::Quoted; // Details about the rewards paid for attestations // All rewards in GWei @@ -17,6 +18,12 @@ pub struct IdealAttestationRewards { // Ideal attester's reward for source vote in gwei #[serde(with = "serde_utils::quoted_u64")] pub source: u64, + // Ideal attester's inclusion_delay reward in gwei (phase0 only) + #[serde(skip_serializing_if = "Option::is_none")] + pub inclusion_delay: Option>, + // Ideal attester's inactivity penalty in gwei + #[serde(with = "serde_utils::quoted_i64")] + pub inactivity: i64, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] @@ -25,16 +32,20 @@ pub struct TotalAttestationRewards { #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // attester's reward for head vote in gwei - #[serde(with = "serde_utils::quoted_u64")] - pub head: u64, + #[serde(with = "serde_utils::quoted_i64")] + pub head: i64, // attester's reward for target vote in gwei #[serde(with = "serde_utils::quoted_i64")] pub target: i64, // attester's reward for source vote in gwei #[serde(with = "serde_utils::quoted_i64")] pub source: i64, - // TBD attester's inclusion_delay reward in gwei (phase0 only) - // pub inclusion_delay: u64, + // attester's inclusion_delay reward in gwei (phase0 only) + #[serde(skip_serializing_if = "Option::is_none")] + pub inclusion_delay: Option>, + // attester's inactivity penalty in gwei + #[serde(with = "serde_utils::quoted_i64")] + pub inactivity: i64, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] diff --git a/consensus/state_processing/src/common/base.rs b/consensus/state_processing/src/common/base.rs index b5cb382721..a8d04ad6cd 100644 --- a/consensus/state_processing/src/common/base.rs +++ b/consensus/state_processing/src/common/base.rs @@ -17,3 +17,15 @@ pub fn get_base_reward( .safe_div(spec.base_rewards_per_epoch) .map_err(Into::into) } + +pub fn get_base_reward_from_effective_balance( + effective_balance: u64, + total_active_balance: u64, + spec: &ChainSpec, +) -> Result { + effective_balance + .safe_mul(spec.base_reward_factor)? + .safe_div(total_active_balance.integer_sqrt())? + .safe_div(spec.base_rewards_per_epoch) + .map_err(Into::into) +} diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index 680563ce74..c5864bd1ef 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -36,7 +36,7 @@ pub fn process_epoch( justification_and_finalization_state.apply_changes_to_state(state); // Rewards and Penalties. - process_rewards_and_penalties(state, &mut validator_statuses, spec)?; + process_rewards_and_penalties(state, &validator_statuses, spec)?; // Registry Updates. process_registry_updates(state, spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index e7a4d9c4dc..74c96d8aee 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -45,7 +45,7 @@ impl AttestationDelta { /// Apply attester and proposer rewards. pub fn process_rewards_and_penalties( state: &mut BeaconState, - validator_statuses: &mut ValidatorStatuses, + validator_statuses: &ValidatorStatuses, spec: &ChainSpec, ) -> Result<(), Error> { if state.current_epoch() == T::genesis_epoch() { @@ -59,7 +59,7 @@ pub fn process_rewards_and_penalties( return Err(Error::ValidatorStatusesInconsistent); } - let deltas = get_attestation_deltas(state, validator_statuses, spec)?; + let deltas = get_attestation_deltas_all(state, validator_statuses, spec)?; // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 // instead). @@ -73,10 +73,41 @@ pub fn process_rewards_and_penalties( } /// Apply rewards for participation in attestations during the previous epoch. -pub fn get_attestation_deltas( +pub fn get_attestation_deltas_all( state: &BeaconState, validator_statuses: &ValidatorStatuses, spec: &ChainSpec, +) -> Result, Error> { + get_attestation_deltas(state, validator_statuses, None, spec) +} + +/// Apply rewards for participation in attestations during the previous epoch, and only compute +/// rewards for a subset of validators. +pub fn get_attestation_deltas_subset( + state: &BeaconState, + validator_statuses: &ValidatorStatuses, + validators_subset: &Vec, + spec: &ChainSpec, +) -> Result, Error> { + get_attestation_deltas(state, validator_statuses, Some(validators_subset), spec).map(|deltas| { + deltas + .into_iter() + .enumerate() + .filter(|(index, _)| validators_subset.contains(index)) + .collect() + }) +} + +/// Apply rewards for participation in attestations during the previous epoch. +/// If `maybe_validators_subset` specified, only the deltas for the specified validator subset is +/// returned, otherwise deltas for all validators are returned. +/// +/// Returns a vec of validator indices to `AttestationDelta`. +fn get_attestation_deltas( + state: &BeaconState, + validator_statuses: &ValidatorStatuses, + maybe_validators_subset: Option<&Vec>, + spec: &ChainSpec, ) -> Result, Error> { let previous_epoch = state.previous_epoch(); let finality_delay = state @@ -88,6 +119,13 @@ pub fn get_attestation_deltas( let total_balances = &validator_statuses.total_balances; + // Ignore validator if a subset is specified and validator is not in the subset + let include_validator_delta = |idx| match maybe_validators_subset.as_ref() { + None => true, + Some(validators_subset) if validators_subset.contains(&idx) => true, + Some(_) => false, + }; + for (index, validator) in validator_statuses.statuses.iter().enumerate() { // Ignore ineligible validators. All sub-functions of the spec do this except for // `get_inclusion_delay_deltas`. It's safe to do so here because any validator that is in @@ -99,41 +137,46 @@ pub fn get_attestation_deltas( let base_reward = get_base_reward(state, index, total_balances.current_epoch(), spec)?; - let source_delta = - get_source_delta(validator, base_reward, total_balances, finality_delay, spec)?; - let target_delta = - get_target_delta(validator, base_reward, total_balances, finality_delay, spec)?; - let head_delta = - get_head_delta(validator, base_reward, total_balances, finality_delay, spec)?; let (inclusion_delay_delta, proposer_delta) = get_inclusion_delay_delta(validator, base_reward, spec)?; - let inactivity_penalty_delta = - get_inactivity_penalty_delta(validator, base_reward, finality_delay, spec)?; - let delta = deltas - .get_mut(index) - .ok_or(Error::DeltaOutOfBounds(index))?; - delta.source_delta.combine(source_delta)?; - delta.target_delta.combine(target_delta)?; - delta.head_delta.combine(head_delta)?; - delta.inclusion_delay_delta.combine(inclusion_delay_delta)?; - delta - .inactivity_penalty_delta - .combine(inactivity_penalty_delta)?; + if include_validator_delta(index) { + let source_delta = + get_source_delta(validator, base_reward, total_balances, finality_delay, spec)?; + let target_delta = + get_target_delta(validator, base_reward, total_balances, finality_delay, spec)?; + let head_delta = + get_head_delta(validator, base_reward, total_balances, finality_delay, spec)?; + let inactivity_penalty_delta = + get_inactivity_penalty_delta(validator, base_reward, finality_delay, spec)?; + + let delta = deltas + .get_mut(index) + .ok_or(Error::DeltaOutOfBounds(index))?; + delta.source_delta.combine(source_delta)?; + delta.target_delta.combine(target_delta)?; + delta.head_delta.combine(head_delta)?; + delta.inclusion_delay_delta.combine(inclusion_delay_delta)?; + delta + .inactivity_penalty_delta + .combine(inactivity_penalty_delta)?; + } if let Some((proposer_index, proposer_delta)) = proposer_delta { - deltas - .get_mut(proposer_index) - .ok_or(Error::ValidatorStatusesInconsistent)? - .inclusion_delay_delta - .combine(proposer_delta)?; + if include_validator_delta(proposer_index) { + deltas + .get_mut(proposer_index) + .ok_or(Error::ValidatorStatusesInconsistent)? + .inclusion_delay_delta + .combine(proposer_delta)?; + } } } Ok(deltas) } -fn get_attestation_component_delta( +pub fn get_attestation_component_delta( index_in_unslashed_attesting_indices: bool, attesting_balance: u64, total_balances: &TotalBalances, @@ -216,7 +259,7 @@ fn get_head_delta( ) } -fn get_inclusion_delay_delta( +pub fn get_inclusion_delay_delta( validator: &ValidatorStatus, base_reward: u64, spec: &ChainSpec, @@ -242,7 +285,7 @@ fn get_inclusion_delay_delta( } } -fn get_inactivity_penalty_delta( +pub fn get_inactivity_penalty_delta( validator: &ValidatorStatus, base_reward: u64, finality_delay: u64, diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 31542ba447..5e71187156 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -120,7 +120,7 @@ impl EpochTransition for RewardsAndPenalties { BeaconState::Base(_) => { let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; validator_statuses.process_attestations(state)?; - base::process_rewards_and_penalties(state, &mut validator_statuses, spec) + base::process_rewards_and_penalties(state, &validator_statuses, spec) } BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_rewards_and_penalties( diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index c59ceabe0b..ee0fc265e1 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -118,7 +118,7 @@ impl Case for RewardsTest { let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; validator_statuses.process_attestations(&state)?; - let deltas = base::rewards_and_penalties::get_attestation_deltas( + let deltas = base::rewards_and_penalties::get_attestation_deltas_all( &state, &validator_statuses, spec, From 071dd4cd9c0ba4ef62f37d52895b83b15adbb36c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 21 Jul 2023 04:46:52 +0000 Subject: [PATCH 18/75] Add self-hosted runners v2 (#4506) ## Issue Addressed NA ## Proposed Changes Carries on from #4115, with the following modifications: 1. Self-hosted runners are only enabled if `github.repository == sigp/lighthouse`. - This allows forks to still have Github-hosted CI. - This gives us a method to switch back to Github-runners if we have extended downtime on self-hosted. 1. Does not remove any existing dependency builds for Github-hosted runners (e.g., installing the latest Rust). 1. Adds the `WATCH_HOST` environment variable which defines where we expect to find the postgres db in the `watch` tests. This should be set to `host.docker.internal` for the tests to pass on self-hosted runners. ## Additional Info NA Co-authored-by: antondlr --- .github/workflows/test-suite.yml | 24 +++++++++++++++++++----- watch/tests/tests.rs | 11 +++++++++++ 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index e3342ac370..ff7a9cf2f1 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -17,6 +17,10 @@ env: PINNED_NIGHTLY: nightly-2023-04-16 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # Enable self-hosted runners for the sigp repo only. + SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} + # Self-hosted runners need to reference a different host for `./watch` tests. + WATCH_HOST: ${{ github.repository == 'sigp/lighthouse' && 'host.docker.internal' || 'localhost' }} jobs: target-branch-check: name: target-branch-check @@ -48,11 +52,13 @@ jobs: run: make cargo-fmt release-tests-ubuntu: name: release-tests-ubuntu - runs-on: ubuntu-latest + # Use self-hosted runners only on the sigp repo. + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "large"]') || 'ubuntu-latest' }} needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust + if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 @@ -64,11 +70,12 @@ jobs: run: make test-release release-tests-windows: name: release-tests-windows - runs-on: windows-2019 + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows"]') || 'windows-2019' }} needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust + if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - name: Use Node.js uses: actions/setup-node@v2 @@ -83,6 +90,7 @@ jobs: - name: Install make run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 + if: env.SELF_HOSTED_RUNNERS == false with: version: "15.0" directory: ${{ runner.temp }}/llvm @@ -92,11 +100,13 @@ jobs: run: make test-release beacon-chain-tests: name: beacon-chain-tests - runs-on: ubuntu-latest + # Use self-hosted runners only on the sigp repo. + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "large"]') || 'ubuntu-latest' }} needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust + if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 @@ -130,11 +140,13 @@ jobs: run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu - runs-on: ubuntu-22.04 + # Use self-hosted runners only on the sigp repo. + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "large"]') || 'ubuntu-latest' }} needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust + if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 @@ -160,11 +172,13 @@ jobs: run: make run-state-transition-tests ef-tests-ubuntu: name: ef-tests-ubuntu - runs-on: ubuntu-latest + # Use self-hosted runners only on the sigp repo. + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "small"]') || 'ubuntu-latest' }} needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust + if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index acdda8c306..28700ccdce 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -22,6 +22,7 @@ use watch::{ }; use log::error; +use std::env; use std::net::SocketAddr; use std::time::Duration; use tokio::{runtime, task::JoinHandle}; @@ -36,6 +37,11 @@ const VALIDATOR_COUNT: usize = 32; const SLOTS_PER_EPOCH: u64 = 32; const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); +/// Set this environment variable to use a different hostname for connecting to +/// the database. Can be set to `host.docker.internal` for docker-in-docker +/// setups. +const WATCH_HOST_ENV_VARIABLE: &str = "WATCH_HOST"; + fn build_test_config(config: &DatabaseConfig) -> PostgresConfig { let mut postgres_config = PostgresConfig::new(); postgres_config @@ -71,6 +77,10 @@ pub async fn create_test_database(config: &DatabaseConfig) { .expect("Database creation failed"); } +pub fn get_host_from_env() -> String { + env::var(WATCH_HOST_ENV_VARIABLE).unwrap_or_else(|_| "localhost".to_string()) +} + struct TesterBuilder { pub harness: BeaconChainHarness>, pub config: Config, @@ -107,6 +117,7 @@ impl TesterBuilder { database: DatabaseConfig { dbname: random_dbname(), port: database_port, + host: get_host_from_env(), ..Default::default() }, server: ServerConfig { From 85a3340d0edde3c7cab6bd88a874ceacce0a5018 Mon Sep 17 00:00:00 2001 From: Aoi Kurokawa Date: Mon, 31 Jul 2023 01:53:03 +0000 Subject: [PATCH 19/75] Implement liveness BeaconAPI (#4343) ## Issue Addressed #4243 ## Proposed Changes - create a new endpoint for liveness/{endpoint} ## Additional Info This is my first PR. --- beacon_node/http_api/src/lib.rs | 40 ++++++++++++++++ beacon_node/http_api/tests/tests.rs | 71 +++++++++++++++++++++++++++++ common/eth2/src/lib.rs | 18 ++++++++ common/eth2/src/types.rs | 7 +++ 4 files changed, 136 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1f93f46110..8c87c315eb 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3301,6 +3301,45 @@ pub fn serve( }, ); + // POST vaidator/liveness/{epoch} + let post_validator_liveness_epoch = eth_v1 + .and(warp::path("validator")) + .and(warp::path("liveness")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and_then( + |epoch: Epoch, indices: Vec, chain: Arc>| { + blocking_json_task(move || { + // Ensure the request is for either the current, previous or next epoch. + let current_epoch = chain + .epoch() + .map_err(warp_utils::reject::beacon_chain_error)?; + let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); + let next_epoch = current_epoch.saturating_add(Epoch::new(1)); + + if epoch < prev_epoch || epoch > next_epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch from the current epoch {}", + epoch, current_epoch + ))); + } + + let liveness: Vec = indices + .iter() + .cloned() + .map(|index| { + let is_live = chain.validator_seen_at_epoch(index as usize, epoch); + api_types::StandardLivenessResponseData { index, is_live } + }) + .collect(); + + Ok(api_types::GenericResponse::from(liveness)) + }) + }, + ); + // POST lighthouse/liveness let post_lighthouse_liveness = warp::path("lighthouse") .and(warp::path("liveness")) @@ -3963,6 +4002,7 @@ pub fn serve( .uor(post_validator_sync_committee_subscriptions) .uor(post_validator_prepare_beacon_proposer) .uor(post_validator_register_validator) + .uor(post_validator_liveness_epoch) .uor(post_lighthouse_liveness) .uor(post_lighthouse_database_reconstruct) .uor(post_lighthouse_database_historical_blocks) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 741ee1ffc0..dc8ca49d20 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2980,6 +2980,69 @@ impl ApiTester { self } + pub async fn test_post_validator_liveness_epoch(self) -> Self { + let epoch = self.chain.epoch().unwrap(); + let head_state = self.chain.head_beacon_state_cloned(); + let indices = (0..head_state.validators().len()) + .map(|i| i as u64) + .collect::>(); + + // Construct the expected response + let expected: Vec = head_state + .validators() + .iter() + .enumerate() + .map(|(index, _)| StandardLivenessResponseData { + index: index as u64, + is_live: false, + }) + .collect(); + + let result = self + .client + .post_validator_liveness_epoch(epoch, indices.clone()) + .await + .unwrap() + .data; + + assert_eq!(result, expected); + + // Attest to the current slot + self.client + .post_beacon_pool_attestations(self.attestations.as_slice()) + .await + .unwrap(); + + let result = self + .client + .post_validator_liveness_epoch(epoch, indices.clone()) + .await + .unwrap() + .data; + + let committees = head_state + .get_beacon_committees_at_slot(self.chain.slot().unwrap()) + .unwrap(); + let attesting_validators: Vec = committees + .into_iter() + .flat_map(|committee| committee.committee.iter().cloned()) + .collect(); + // All attesters should now be considered live + let expected = expected + .into_iter() + .map(|mut a| { + if attesting_validators.contains(&(a.index as usize)) { + a.is_live = true; + } + a + }) + .collect::>(); + + assert_eq!(result, expected); + + self + } + // Helper function for tests that require a valid RANDAO signature. async fn get_test_randao(&self, slot: Slot, epoch: Epoch) -> (u64, SignatureBytes) { let fork = self.chain.canonical_head.cached_head().head_fork(); @@ -4870,6 +4933,14 @@ async fn builder_works_post_capella() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_liveness_epoch() { + ApiTester::new() + .await + .test_post_validator_liveness_epoch() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index e34916beba..5fcddbc46d 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1636,6 +1636,24 @@ impl BeaconNodeHttpClient { .await } + /// `POST validator/liveness/{epoch}` + pub async fn post_validator_liveness_epoch( + &self, + epoch: Epoch, + indices: Vec, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("liveness") + .push(&epoch.to_string()); + + self.post_with_timeout_and_response(path, &indices, self.timeouts.liveness) + .await + } + /// `POST validator/duties/attester/{epoch}` pub async fn post_validator_duties_attester( &self, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 5f2e1ada7b..f451d3b8f2 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1225,6 +1225,13 @@ impl FromStr for Accept { } } +#[derive(PartialEq, Debug, Serialize, Deserialize)] +pub struct StandardLivenessResponseData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub is_live: bool, +} + #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, From eafe08780c50bb02bd8a2e9829b894102375bad4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 31 Jul 2023 01:53:04 +0000 Subject: [PATCH 20/75] Restore upstream arbitrary (#4372) ## Proposed Changes Remove patch for `arbitrary` in favour of upstream, now that the `arithmetic_side_effects` lint no longer triggers in derive macro code. ## Additional Info ~~Blocked on Rust 1.71.0, to be released 13 July 23~~ --- Cargo.lock | 10 ++++++---- Cargo.toml | 1 - 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fda8cd761f..ec8ec6f5fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -263,7 +263,8 @@ checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arbitrary" version = "1.3.0" -source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" dependencies = [ "derive_arbitrary", ] @@ -1768,12 +1769,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.0" -source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 8b820d2a21..cb09d26a5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,7 +90,6 @@ resolver = "2" [patch] [patch.crates-io] warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } -arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } [profile.maxperf] inherits = "release" From b96cfcaaa4ca02d4531537a143df6b2de3b29887 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 31 Jul 2023 01:53:05 +0000 Subject: [PATCH 21/75] Fix bug in lcli transition-blocks and improve pretty-ssz (#4513) ## Proposed Changes - Fix bad `state_root` reuse in `lcli transition-blocks` that resulted in invalid results at skipped slots. - Modernise `lcli pretty-ssz` to include fork-generic decoders for `SignedBeaconBlock` and `BeaconState` which respect the `--network`/`--testnet-dir` flag. ## Additional Info Breaking change: the underscore names like `signed_block_merge` are removed in favour of the fork-generic name `SignedBeaconBlock`, and fork-specific names which match the superstruct variants, e.g. `SignedBeaconBlockMerge`. --- lcli/src/main.rs | 4 +- lcli/src/parse_ssz.rs | 71 +++++++++++++++----- lcli/src/transition_blocks.rs | 11 ++- scripts/local_testnet/start_local_testnet.sh | 2 +- 4 files changed, 64 insertions(+), 24 deletions(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 8fbf5638b4..38fec2ebb4 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -968,7 +968,9 @@ fn run( .map_err(|e| format!("Failed to skip slots: {}", e)) } ("pretty-ssz", Some(matches)) => { - run_parse_ssz::(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) + let network_config = get_network_config()?; + run_parse_ssz::(network_config, matches) + .map_err(|e| format!("Failed to pretty print hex: {}", e)) } ("deploy-deposit-contract", Some(matches)) => { deploy_deposit_contract::run::(env, matches) diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index 5d988ee181..5c306f4fdc 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -1,5 +1,6 @@ use clap::ArgMatches; use clap_utils::parse_required; +use eth2_network_config::Eth2NetworkConfig; use serde::Serialize; use snap::raw::Decoder; use ssz::Decode; @@ -26,7 +27,10 @@ impl FromStr for OutputFormat { } } -pub fn run_parse_ssz(matches: &ArgMatches) -> Result<(), String> { +pub fn run_parse_ssz( + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { let type_str = matches.value_of("type").ok_or("No type supplied")?; let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; let format = parse_required(matches, "format")?; @@ -44,44 +48,79 @@ pub fn run_parse_ssz(matches: &ArgMatches) -> Result<(), String> { bytes }; - info!("Using {} spec", T::spec_name()); - info!("Type: {:?}", type_str); + let spec = &network_config.chain_spec::()?; + info!( + "Using {} network config ({} preset)", + spec.config_name.as_deref().unwrap_or("unknown"), + T::spec_name() + ); + info!("Type: {type_str}"); + // More fork-specific decoders may need to be added in future, but shouldn't be 100% necessary, + // as the fork-generic decoder will always be available (requires correct --network flag). match type_str { - "signed_block_base" => decode_and_print::>(&bytes, format)?, - "signed_block_altair" => decode_and_print::>(&bytes, format)?, - "signed_block_merge" => decode_and_print::>(&bytes, format)?, - "block_base" => decode_and_print::>(&bytes, format)?, - "block_altair" => decode_and_print::>(&bytes, format)?, - "block_merge" => decode_and_print::>(&bytes, format)?, - "state_base" => decode_and_print::>(&bytes, format)?, - "state_altair" => decode_and_print::>(&bytes, format)?, - "state_merge" => decode_and_print::>(&bytes, format)?, + "SignedBeaconBlock" => decode_and_print::>( + &bytes, + |bytes| SignedBeaconBlock::from_ssz_bytes(bytes, spec), + format, + )?, + "SignedBeaconBlockBase" | "SignedBeaconBlockPhase0" => { + decode_and_print(&bytes, SignedBeaconBlockBase::::from_ssz_bytes, format)? + } + "SignedBeaconBlockAltair" => { + decode_and_print(&bytes, SignedBeaconBlockAltair::::from_ssz_bytes, format)? + } + "SignedBeaconBlockMerge" | "SignedBeaconBlockBellatrix" => { + decode_and_print(&bytes, SignedBeaconBlockMerge::::from_ssz_bytes, format)? + } + "SignedBeaconBlockCapella" => decode_and_print( + &bytes, + SignedBeaconBlockCapella::::from_ssz_bytes, + format, + )?, + "BeaconState" => decode_and_print::>( + &bytes, + |bytes| BeaconState::from_ssz_bytes(bytes, spec), + format, + )?, + "BeaconStateBase" | "BeaconStatePhase0" => { + decode_and_print(&bytes, BeaconStateBase::::from_ssz_bytes, format)? + } + "BeaconStateAltair" => { + decode_and_print(&bytes, BeaconStateAltair::::from_ssz_bytes, format)? + } + "BeaconStateMerge" | "BeaconStateBellatrix" => { + decode_and_print(&bytes, BeaconStateMerge::::from_ssz_bytes, format)? + } + "BeaconStateCapella" => { + decode_and_print(&bytes, BeaconStateCapella::::from_ssz_bytes, format)? + } other => return Err(format!("Unknown type: {}", other)), }; Ok(()) } -fn decode_and_print( +fn decode_and_print( bytes: &[u8], + decoder: impl FnOnce(&[u8]) -> Result, output_format: OutputFormat, ) -> Result<(), String> { - let item = T::from_ssz_bytes(bytes).map_err(|e| format!("SSZ decode failed: {:?}", e))?; + let item = decoder(bytes).map_err(|e| format!("SSZ decode failed: {e:?}"))?; match output_format { OutputFormat::Json => { println!( "{}", serde_json::to_string(&item) - .map_err(|e| format!("Unable to write object to JSON: {:?}", e))? + .map_err(|e| format!("Unable to write object to JSON: {e:?}"))? ); } OutputFormat::Yaml => { println!( "{}", serde_yaml::to_string(&item) - .map_err(|e| format!("Unable to write object to YAML: {:?}", e))? + .map_err(|e| format!("Unable to write object to YAML: {e:?}"))? ); } } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 85705177dc..23b0ae2620 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -73,9 +73,10 @@ use eth2::{ }; use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; +use state_processing::state_advance::complete_state_advance; use state_processing::{ - block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, - BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + block_signature_verifier::BlockSignatureVerifier, per_block_processing, BlockSignatureStrategy, + ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs::File; @@ -332,10 +333,8 @@ fn do_transition( // Transition the parent state to the block slot. let t = Instant::now(); - for i in pre_state.slot().as_u64()..block.slot().as_u64() { - per_slot_processing(&mut pre_state, Some(state_root), spec) - .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; - } + complete_state_advance(&mut pre_state, Some(state_root), block.slot(), spec) + .map_err(|e| format!("Unable to perform complete advance: {e:?}"))?; debug!("Slot processing: {:?}", t.elapsed()); let t = Instant::now(); diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 64111d5627..4b8357b993 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -103,7 +103,7 @@ echo "executing: ./setup.sh >> $LOG_DIR/setup.log" ./setup.sh >> $LOG_DIR/setup.log 2>&1 # Update future hardforks time in the EL genesis file based on the CL genesis time -GENESIS_TIME=$(lcli pretty-ssz state_merge $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') +GENESIS_TIME=$(lcli pretty-ssz --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') echo $GENESIS_TIME CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) echo $CAPELLA_TIME From b5337c0ea57adb30bd933ea2a7201fce12372f6f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 31 Jul 2023 01:53:06 +0000 Subject: [PATCH 22/75] Fix incorrect ideal rewards calculation (#4520) ## Issue Addressed The PR fixes a bug where the the ideal rewards for source and head were incorrectly set. Output from testing a validator that performed optimally in a Phase 0 epoch , note the `source` and `target` under ideal rewards is incorrect (compared to the actual `total_rewards` below): ```json { "ideal_rewards": [ ... { "effective_balance": "32000000000", "head": "18771", "target": "18770", "source": "18729", "inclusion_delay": "17083", "inactivity": "0" } ], "total_rewards": [ { "validator_index": "0", "head": "18729", "target": "18770", "source": "18771", "inclusion_delay": "17083", "inactivity": "0" } ] ``` --- beacon_node/beacon_chain/src/attestation_rewards.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 9fc21b668f..94bd28f98f 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -330,7 +330,7 @@ impl BeaconChain { // compute ideal head rewards let head = get_attestation_component_delta( true, - total_balances.previous_epoch_attesters(), + total_balances.previous_epoch_head_attesters(), total_balances, base_reward, finality_delay, @@ -352,7 +352,7 @@ impl BeaconChain { // compute ideal source rewards let source = get_attestation_component_delta( true, - total_balances.previous_epoch_head_attesters(), + total_balances.previous_epoch_attesters(), total_balances, base_reward, finality_delay, From 117802cef130bedac96a0b5441bb46e6b03ad491 Mon Sep 17 00:00:00 2001 From: Gua00va Date: Mon, 31 Jul 2023 01:53:07 +0000 Subject: [PATCH 23/75] Add Eth Version Header (#4528) ## Issue Addressed Closes #4525 ## Proposed Changes `GET /eth/v1/validator/blinded_blocks` endpoint and `GET /eth/v1/validator/blocks` now send `Eth-Version` header. Co-authored-by: Gua00va <105484243+Gua00va@users.noreply.github.com> --- beacon_node/http_api/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 8c87c315eb..b45c4285aa 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2697,6 +2697,7 @@ pub fn serve( fork_versioned_response(endpoint_version, fork_name, block) .map(|response| warp::reply::json(&response).into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) }, ); @@ -2754,6 +2755,7 @@ pub fn serve( // Pose as a V2 endpoint so we return the fork `version`. fork_versioned_response(V2, fork_name, block) .map(|response| warp::reply::json(&response).into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) }, ); From 8654f20028704512d472f96c42512cc3d206d319 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 31 Jul 2023 01:53:08 +0000 Subject: [PATCH 24/75] Development feature flag - Disable backfill (#4537) Often when testing I have to create a hack which is annoying to maintain. I think it might be handy to add a custom compile-time flag that developers can use if they want to test things locally without having to backfill a bunch of blocks. There is probably an argument to have a feature called "backfill" which is enabled by default and can be disabled. I didn't go this route because I think it's counter-intuitive to have a feature that enables a core and necessary behaviour. --- beacon_node/network/Cargo.toml | 6 +++++- beacon_node/network/src/service.rs | 6 ++++++ beacon_node/network/src/sync/manager.rs | 3 +++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index a5cc12bbc5..c37b0fa45d 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -48,4 +48,8 @@ operation_pool = { path = "../operation_pool" } execution_layer = { path = "../execution_layer" } beacon_processor = { path = "../beacon_processor" } parking_lot = "0.12.0" -environment = { path = "../../lighthouse/environment" } \ No newline at end of file +environment = { path = "../../lighthouse/environment" } + +[features] +# NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill +disable-backfill = [] diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index c2719477f1..b517d57df3 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -232,6 +232,12 @@ impl NetworkService { // build the channels for external comms let (network_senders, network_recievers) = NetworkSenders::new(); + #[cfg(feature = "disable-backfill")] + warn!( + network_log, + "Backfill is disabled. DO NOT RUN IN PRODUCTION" + ); + // try and construct UPnP port mappings if required. if let Some(upnp_config) = crate::nat::UPnPConfig::from_config(config) { let upnp_log = network_log.new(o!("service" => "UPnP")); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 72542752c5..670e88eac5 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -395,6 +395,7 @@ impl SyncManager { // If we would otherwise be synced, first check if we need to perform or // complete a backfill sync. + #[cfg(not(feature = "disable_backfill"))] if matches!(sync_state, SyncState::Synced) { // Determine if we need to start/resume/restart a backfill sync. match self.backfill_sync.start(&mut self.network) { @@ -419,6 +420,7 @@ impl SyncManager { } Some((RangeSyncType::Finalized, start_slot, target_slot)) => { // If there is a backfill sync in progress pause it. + #[cfg(not(feature = "disable_backfill"))] self.backfill_sync.pause(); SyncState::SyncingFinalized { @@ -428,6 +430,7 @@ impl SyncManager { } Some((RangeSyncType::Head, start_slot, target_slot)) => { // If there is a backfill sync in progress pause it. + #[cfg(not(feature = "disable_backfill"))] self.backfill_sync.pause(); SyncState::SyncingHead { From e8c411c288e6d4d4726f123ac19a5942f7f1cf8c Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 31 Jul 2023 23:51:37 +0000 Subject: [PATCH 25/75] add ssz support in request body for /beacon/blocks endpoints (v1 & v2) (#4479) ## Issue Addressed [#4457](https://github.com/sigp/lighthouse/issues/4457) ## Proposed Changes add ssz support in request body for /beacon/blocks endpoints (v1 & v2) ## Additional Info --- Cargo.lock | 1 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 92 ++++++++++++++++++- .../tests/broadcast_validation_tests.rs | 42 +++++++++ beacon_node/http_api/tests/tests.rs | 55 +++++++++++ common/eth2/src/lib.rs | 87 +++++++++++++++++- 6 files changed, 276 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec8ec6f5fc..a7e8311231 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3524,6 +3524,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "bs58", + "bytes", "directory", "environment", "eth1", diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 2b117b26ce..4b4a28b51e 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -42,6 +42,7 @@ operation_pool = { path = "../operation_pool" } sensitive_url = { path = "../../common/sensitive_url" } unused_port = {path = "../../common/unused_port"} store = { path = "../store" } +bytes = "1.1.0" [dev-dependencies] environment = { path = "../../lighthouse/environment" } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b45c4285aa..7d1475809a 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -29,6 +29,7 @@ use beacon_chain::{ BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; +use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, @@ -1236,6 +1237,41 @@ pub fn serve( }, ); + let post_beacon_blocks_ssz = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |block_bytes: Bytes, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + let block = match SignedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) { + Ok(data) => data, + Err(e) => { + return Err(warp_utils::reject::custom_bad_request(format!("{:?}", e))) + } + }; + publish_blocks::publish_block( + None, + ProvenancedBlock::local(Arc::new(block)), + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }, + ); + let post_beacon_blocks_v2 = eth_v2 .and(warp::path("beacon")) .and(warp::path("blocks")) @@ -1274,6 +1310,57 @@ pub fn serve( }, ); + let post_beacon_blocks_v2_ssz = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + let block = match SignedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) { + Ok(data) => data, + Err(_) => { + return warp::reply::with_status( + StatusCode::BAD_REQUEST, + eth2::StatusCode::BAD_REQUEST, + ) + .into_response(); + } + }; + match publish_blocks::publish_block( + None, + ProvenancedBlock::local(Arc::new(block)), + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } + }, + ); + /* * beacon/blocks */ @@ -3984,7 +4071,10 @@ pub fn serve( .boxed() .uor( warp::post().and( - post_beacon_blocks + warp::header::exact("Content-Type", "application/octet-stream") + // Routes which expect `application/octet-stream` go within this `and`. + .and(post_beacon_blocks_ssz.uor(post_beacon_blocks_v2_ssz)) + .uor(post_beacon_blocks) .uor(post_beacon_blinded_blocks) .uor(post_beacon_blocks_v2) .uor(post_beacon_blinded_blocks_v2) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 4819dd99e7..457276d702 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -175,6 +175,48 @@ pub async fn gossip_full_pass() { .block_is_known_to_fork_choice(&block.canonical_root())); } +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_full_pass_ssz() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2_ssz(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn consensus_invalid() { diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index dc8ca49d20..7c3872925a 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1247,6 +1247,22 @@ impl ApiTester { self } + pub async fn test_post_beacon_blocks_ssz_valid(mut self) -> Self { + let next_block = &self.next_block; + + self.client + .post_beacon_blocks_ssz(next_block) + .await + .unwrap(); + + assert!( + self.network_rx.network_recv.recv().await.is_some(), + "valid blocks should be sent to network" + ); + + self + } + pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { let block = self .harness @@ -1270,6 +1286,29 @@ impl ApiTester { self } + pub async fn test_post_beacon_blocks_ssz_invalid(mut self) -> Self { + let block = self + .harness + .make_block_with_modifier( + self.harness.get_current_state(), + self.harness.get_current_slot(), + |b| { + *b.state_root_mut() = Hash256::zero(); + }, + ) + .await + .0; + + assert!(self.client.post_beacon_blocks_ssz(&block).await.is_err()); + + assert!( + self.network_rx.network_recv.recv().await.is_some(), + "gossip valid blocks should be sent to network" + ); + + self + } + pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { let expected = block_id @@ -4451,6 +4490,22 @@ async fn post_beacon_blocks_valid() { ApiTester::new().await.test_post_beacon_blocks_valid().await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_beacon_blocks_ssz_valid() { + ApiTester::new() + .await + .test_post_beacon_blocks_ssz_valid() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_post_beacon_blocks_ssz_invalid() { + ApiTester::new() + .await + .test_post_beacon_blocks_ssz_invalid() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_beacon_blocks_invalid() { ApiTester::new() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 5fcddbc46d..661f9a09eb 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -21,10 +21,14 @@ use futures_util::StreamExt; use lighthouse_network::PeerId; use pretty_reqwest_error::PrettyReqwestError; pub use reqwest; -use reqwest::{IntoUrl, RequestBuilder, Response}; +use reqwest::{ + header::{HeaderMap, HeaderValue}, + Body, IntoUrl, RequestBuilder, Response, +}; pub use reqwest::{StatusCode, Url}; pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; +use ssz::Encode; use std::convert::TryFrom; use std::fmt; use std::iter::Iterator; @@ -322,6 +326,25 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic_with_ssz_body, U: IntoUrl>( + &self, + url: U, + body: T, + timeout: Option, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder + .header("Content-Type", "application/octet-stream") + .body(body) + .send() + .await?; + ok_or_error(response).await + } + /// Generic POST function supporting arbitrary responses and timeouts. async fn post_generic_with_consensus_version( &self, @@ -342,6 +365,31 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic_with_consensus_version_and_ssz_body, U: IntoUrl>( + &self, + url: U, + body: T, + timeout: Option, + fork: ForkName, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let mut headers = HeaderMap::new(); + headers.insert( + CONSENSUS_VERSION_HEADER, + HeaderValue::from_str(&fork.to_string()).expect("Failed to create header value"), + ); + headers.insert( + "Content-Type", + HeaderValue::from_static("application/octet-stream"), + ); + let response = builder.headers(headers).body(body).send().await?; + ok_or_error(response).await + } + /// `GET beacon/genesis` /// /// ## Errors @@ -654,6 +702,26 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_blocks_ssz>( + &self, + block: &SignedBeaconBlock, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks"); + + self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal)) + .await?; + + Ok(()) + } + /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. @@ -727,6 +795,23 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST v2/beacon/blocks` + pub async fn post_beacon_blocks_v2_ssz>( + &self, + block: &SignedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version_and_ssz_body( + self.post_beacon_blocks_v2_path(validation_level)?, + block.as_ssz_bytes(), + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + /// `POST v2/beacon/blinded_blocks` pub async fn post_beacon_blinded_blocks_v2( &self, From cb275e746dce455d473bef4d653fe85748477718 Mon Sep 17 00:00:00 2001 From: chonghe Date: Mon, 31 Jul 2023 23:51:38 +0000 Subject: [PATCH 26/75] Update Lighthouse book FAQ (#4510) Some updates in the FAQ based on issues seen on Discord. Additionally, corrected the disk usage on the default SPRP as the previously provided value is not correct. Co-authored-by: chonghe <44791194+chong-he@users.noreply.github.com> --- book/src/advanced_database.md | 4 +-- book/src/advanced_networking.md | 2 +- book/src/builders.md | 2 ++ book/src/faq.md | 51 +++++++++++++++++++++++++++++++-- 4 files changed, 53 insertions(+), 6 deletions(-) diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index d951104054..20c5d7443b 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -28,8 +28,8 @@ some example values. | Research | 32 | 3.4 TB | 155 ms | | Block explorer/analysis | 128 | 851 GB | 620 ms | | Enthusiast (prev. default) | 2048 | 53.6 GB | 10.2 s | -| Hobbyist | 4096 | 26.8 GB | 20.5 s | -| Validator only (default) | 8192 | 8.1 GB | 41 s | +| Hobbyist | 4096 | 26.8 GB | 20.5 s | +| Validator only (default) | 8192 | 12.7 GB | 41 s | *Last update: May 2023. diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 586503cb96..ba07a6f87f 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -172,7 +172,7 @@ In order to do so, lighthouse provides the following CLI options/parameters. advertises some address, must be reachable both over UDP and TCP. -In the general case, an user will not require to set these explicitly. Update +In the general case, a user will not require to set these explicitly. Update these options only if you can guarantee your node is reachable with these values. diff --git a/book/src/builders.md b/book/src/builders.md index 6db360d70e..2be4841ddf 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -201,6 +201,8 @@ else: use local payload ``` +If you would like to always use the builder payload, you can add the flag `--always-prefer-builder-payload` to the beacon node. + ## Checking your builder config You can check that your builder is configured correctly by looking for these log messages. diff --git a/book/src/faq.md b/book/src/faq.md index d3e25438a7..15c5757064 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -10,6 +10,8 @@ - [My beacon node logs `WARN BlockProcessingFailure outcome: MissingBeaconBlock`, what should I do?](#bn-missing-beacon) - [After checkpoint sync, the progress of `downloading historical blocks` is slow. Why?](#bn-download-slow) - [My beacon node logs `WARN Error processing HTTP API request`, what should I do?](#bn-http) +- [My beacon node logs `WARN Error signalling fork choice waiter`, what should I do?](#bn-fork-choice) +- [My beacon node logs `ERRO Aggregate attestation queue full`, what should I do?](#bn-queue-full) ## [Validator](#validator-1) - [Why does it take so long for a validator to be activated?](#vc-activation) @@ -30,7 +32,7 @@ - [My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?](#net-bn-vc) - [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip) - [How to change the TCP/UDP port 9000 that Lighthouse listens on?](#net-port) - +- [Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return.](#net-subnet) ## [Miscellaneous](#miscellaneous-1) - [What should I do if I lose my slashing protection database?](#misc-slashing) @@ -74,7 +76,7 @@ The `WARN Execution engine called failed` log is shown when the beacon node cann `error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` -which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flag `--execution-timeout-multiplier 3` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: +which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flags `--execution-timeout-multiplier 3` and `--disable-lock-timeouts` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: 1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. 1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. 1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. @@ -170,6 +172,27 @@ ERRO Failed to download attester duties err: FailedToDownloadAttesters("Som This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. +### My beacon node logs `WARN Error signalling fork choice waiter`, what should I do? + +An example of the full log is shown below: + +``` +WARN Error signalling fork choice waiter slot: 6763073, error: ForkChoiceSignalOutOfOrder { current: Slot(6763074), latest: Slot(6763073) }, service: state_advance +``` + +This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. This can happen, e.g., when the beacon node is downloading historical blocks, or when the execution client is syncing. The error will disappear when the resources used return to normal or when the node is synced. + + +### My beacon node logs `ERRO Aggregate attestation queue full`, what should I do? + +An example of the full log is shown below: +``` +ERRO Aggregate attestation queue full, queue_len: 4096, msg: the system has insufficient resources for load, module: network::beacon_processor:1542 +``` + +This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. This can happen, e.g., when the beacon node is downloading historical blocks, or when the execution client is syncing. The error will disappear when the resources used return to normal or when the node is synced. + + ## Validator ### Why does it take so long for a validator to be activated? @@ -279,12 +302,26 @@ The first thing is to ensure both consensus and execution clients are synced wit - the internet is working well - you have sufficient peers -You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurrence. +You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). + +Another cause for missing attestations is delays during block processing. When this happens, the debug logs will show (debug logs can be found under `$datadir/beacon/logs`): + +``` +DEBG Delayed head block set_as_head_delay: Some(93.579425ms), imported_delay: Some(1.460405278s), observed_delay: Some(2.540811921s), block_delay: 4.094796624s, slot: 6837344, proposer_index: 211108, block_root: 0x2c52231c0a5a117401f5231585de8aa5dd963bc7cbc00c544e681342eedd1700, service: beacon +``` + +The fields to look for are `imported_delay > 1s` and `observed_delay < 3s`. The `imported_delay` is how long the node took to process the block. The `imported_delay` of larger than 1 second suggests that there is slowness in processing the block. It could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). The `observed_delay` of less than 3 seconds means that the block is not arriving late from the block proposer. Combining the above, this implies that the validator should have been able to attest to the block, but failed due to slowness in the node processing the block. + ### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. +You could also check for the sync aggregate participation percentage on block explorers such as [beaconcha.in](https://beaconcha.in/). A low sync aggregate participation percentage (e.g., 60-70%) indicates that the block that you are assigned to attest to may be published late. As a result, your validator fails to correctly attest to the block. + +Another possible reason for missing the head vote is due to a chain "reorg". A reorg can happen if the proposer publishes block `n` late, and the proposer of block `n+1` builds upon block `n-1` instead of `n`. This is called a "reorg". Due to the reorg, block `n` was never included in the chain. If you are assigned to attest at slot `n`, it is possible you may still attest to block `n` despite most of the network recognizing the block as being late. In this case you will miss the head reward. + + ### Can I submit a voluntary exit message without running a beacon node? Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/voluntary-exit.md). @@ -425,6 +462,14 @@ No. Lighthouse will auto-detect the change and update your Ethereum Node Record ### How to change the TCP/UDP port 9000 that Lighthouse listens on? Use the flag ```--port ``` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., ```--port 9001```. +### Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return. + +Previously, having more validators means subscribing to more subnets. Since the change, a node will now only subscribe to 2 subnets in total. This will bring about significant reductions in bandwidth for nodes with multiple validators. + +While subscribing to more subnets can ensure you have peers on a wider range of subnets, these subscriptions consume resources and bandwidth. This does not significantly increase the performance of the node, however it does benefit other nodes on the network. + +If you would still like to subscribe to all subnets, you can use the flag `subscribe-all-subnets`. This may improve the block rewards by 1-5%, though it comes at the cost of a much higher bandwidth requirement. + ## Miscellaneous ### What should I do if I lose my slashing protection database? From 73764d0dd2c0a457b7f438f003b4dbc6b94a65e3 Mon Sep 17 00:00:00 2001 From: Gua00va Date: Mon, 31 Jul 2023 23:51:39 +0000 Subject: [PATCH 27/75] Deprecate `exchangeTransitionConfiguration` functionality (#4517) ## Issue Addressed Solves #4442 ## Proposed Changes EL clients log errors if we don't query this endpoint, but they are making releases that remove this error logging. After those are out we can stop calling it, after which point EL teams will remove the endpoint entirely. Refer https://hackmd.io/@n0ble/deprecate-exchgTC --- .../beacon_chain/src/merge_readiness.rs | 17 ----- beacon_node/client/src/builder.rs | 3 - beacon_node/client/src/notifier.rs | 8 --- beacon_node/execution_layer/src/engine_api.rs | 11 +-- .../execution_layer/src/engine_api/http.rs | 26 ------- beacon_node/execution_layer/src/lib.rs | 67 ------------------- .../src/test_utils/handle_rpc.rs | 9 --- .../execution_layer/src/test_utils/mod.rs | 1 - .../src/test_rig.rs | 10 --- 9 files changed, 3 insertions(+), 149 deletions(-) diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs index c66df39eed..bfc2b36fdb 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -86,9 +86,6 @@ pub enum MergeReadiness { #[serde(serialize_with = "serialize_uint256")] current_difficulty: Option, }, - /// The transition configuration with the EL failed, there might be a problem with - /// connectivity, authentication or a difference in configuration. - ExchangeTransitionConfigurationFailed { error: String }, /// The EL can be reached and has the correct configuration, however it's not yet synced. NotSynced, /// The user has not configured this node to use an execution endpoint. @@ -109,12 +106,6 @@ impl fmt::Display for MergeReadiness { params, current_difficulty ) } - MergeReadiness::ExchangeTransitionConfigurationFailed { error } => write!( - f, - "Could not confirm the transition configuration with the \ - execution endpoint: {:?}", - error - ), MergeReadiness::NotSynced => write!( f, "The execution endpoint is connected and configured, \ @@ -155,14 +146,6 @@ impl BeaconChain { /// Attempts to connect to the EL and confirm that it is ready for the merge. pub async fn check_merge_readiness(&self) -> MergeReadiness { if let Some(el) = self.execution_layer.as_ref() { - if let Err(e) = el.exchange_transition_configuration(&self.spec).await { - // The EL was either unreachable, responded with an error or has a different - // configuration. - return MergeReadiness::ExchangeTransitionConfigurationFailed { - error: format!("{:?}", e), - }; - } - if !el.is_synced_for_notifier().await { // The EL is not synced. return MergeReadiness::NotSynced; diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index b1a507eaa5..14edbb9730 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -850,9 +850,6 @@ where execution_layer.spawn_clean_proposer_caches_routine::( beacon_chain.slot_clock.clone(), ); - - // Spawns a routine that polls the `exchange_transition_configuration` endpoint. - execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone()); } // Spawn a service to publish BLS to execution changes at the Capella fork. diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1ff469fe30..7d81594ee6 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -404,14 +404,6 @@ async fn merge_readiness_logging( "config" => ?other ), }, - readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed { error: _ } => { - error!( - log, - "Not ready for merge"; - "info" => %readiness, - "hint" => "try updating Lighthouse and/or the execution layer", - ) - } readiness @ MergeReadiness::NotSynced => warn!( log, "Not ready for merge"; diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 826294d5ff..359dcb5223 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,9 +1,8 @@ use crate::engines::ForkchoiceState; use crate::http::{ - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, - ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, - ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, + ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, }; use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; pub use ethers_core::types::Transaction; @@ -450,7 +449,6 @@ pub struct EngineCapabilities { pub get_payload_bodies_by_range_v1: bool, pub get_payload_v1: bool, pub get_payload_v2: bool, - pub exchange_transition_configuration_v1: bool, } impl EngineCapabilities { @@ -480,9 +478,6 @@ impl EngineCapabilities { if self.get_payload_v2 { response.push(ENGINE_GET_PAYLOAD_V2); } - if self.exchange_transition_configuration_v1 { - response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1); - } response } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 029866d95b..0ce03e6029 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -46,10 +46,6 @@ pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesB pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); -pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = - "engine_exchangeTransitionConfigurationV1"; -pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); - pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); @@ -68,7 +64,6 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ]; /// This is necessary because a user might run a capella-enabled version of @@ -83,7 +78,6 @@ pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilit get_payload_bodies_by_range_v1: false, get_payload_v1: true, get_payload_v2: false, - exchange_transition_configuration_v1: true, }; /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. @@ -934,24 +928,6 @@ impl HttpJsonRpc { .collect()) } - pub async fn exchange_transition_configuration_v1( - &self, - transition_configuration: TransitionConfigurationV1, - ) -> Result { - let params = json!([transition_configuration]); - - let response = self - .rpc_request( - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, - params, - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT - * self.execution_timeout_multiplier, - ) - .await?; - - Ok(response) - } - pub async fn exchange_capabilities(&self) -> Result { let params = json!([LIGHTHOUSE_CAPABILITIES]); @@ -982,8 +958,6 @@ impl HttpJsonRpc { .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), - exchange_transition_configuration_v1: capabilities - .contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1), }), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d72686baf5..579bebdacb 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -74,8 +74,6 @@ const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128; const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; -const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); - /// A payload alongside some information about where it came from. pub enum ProvenancedPayload

{ /// A good ol' fashioned farm-to-table payload from your local EE. @@ -502,24 +500,6 @@ impl ExecutionLayer { self.spawn(preparation_cleaner, "exec_preparation_cleanup"); } - /// Spawns a routine that polls the `exchange_transition_configuration` endpoint. - pub fn spawn_transition_configuration_poll(&self, spec: ChainSpec) { - let routine = |el: ExecutionLayer| async move { - loop { - if let Err(e) = el.exchange_transition_configuration(&spec).await { - error!( - el.log(), - "Failed to check transition config"; - "error" => ?e - ); - } - sleep(CONFIG_POLL_INTERVAL).await; - } - }; - - self.spawn(routine, "exec_config_poll"); - } - /// Returns `true` if the execution engine is synced and reachable. pub async fn is_synced(&self) -> bool { self.engine().is_synced().await @@ -1318,53 +1298,6 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - pub async fn exchange_transition_configuration(&self, spec: &ChainSpec) -> Result<(), Error> { - let local = TransitionConfigurationV1 { - terminal_total_difficulty: spec.terminal_total_difficulty, - terminal_block_hash: spec.terminal_block_hash, - terminal_block_number: 0, - }; - - let result = self - .engine() - .request(|engine| engine.api.exchange_transition_configuration_v1(local)) - .await; - - match result { - Ok(remote) => { - if local.terminal_total_difficulty != remote.terminal_total_difficulty - || local.terminal_block_hash != remote.terminal_block_hash - { - error!( - self.log(), - "Execution client config mismatch"; - "msg" => "ensure lighthouse and the execution client are up-to-date and \ - configured consistently", - "remote" => ?remote, - "local" => ?local, - ); - Err(Error::EngineError(Box::new(EngineError::Api { - error: ApiError::TransitionConfigurationMismatch, - }))) - } else { - debug!( - self.log(), - "Execution client config is OK"; - ); - Ok(()) - } - } - Err(e) => { - error!( - self.log(), - "Unable to get transition config"; - "error" => ?e, - ); - Err(Error::EngineError(Box::new(e))) - } - } - } - /// Returns the execution engine capabilities resulting from a call to /// engine_exchangeCapabilities. If the capabilities cache is not populated, /// or if it is populated with a cached result of age >= `age_limit`, this diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 79468b2116..62cab5ad28 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -357,15 +357,6 @@ pub async fn handle_rpc( Ok(serde_json::to_value(response).unwrap()) } - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => { - let block_generator = ctx.execution_block_generator.read(); - let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 { - terminal_total_difficulty: block_generator.terminal_total_difficulty, - terminal_block_hash: block_generator.terminal_block_hash, - terminal_block_number: block_generator.terminal_block_number, - }; - Ok(serde_json::to_value(transition_config).unwrap()) - } ENGINE_EXCHANGE_CAPABILITIES => { let engine_capabilities = ctx.engine_capabilities.read(); Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index a8e7bab270..99d264aa7b 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -43,7 +43,6 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_bodies_by_range_v1: true, get_payload_v1: true, get_payload_v2: true, - exchange_transition_configuration_v1: true, }; mod execution_block_generator; diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 726019a848..654b8628b8 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -204,16 +204,6 @@ impl TestRig { let account1 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT1).unwrap()); let account2 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT2).unwrap()); - /* - * Check the transition config endpoint. - */ - for ee in [&self.ee_a, &self.ee_b] { - ee.execution_layer - .exchange_transition_configuration(&self.spec) - .await - .unwrap(); - } - /* * Read the terminal block hash from both pairs, check it's equal. */ From ff9b09d9646b712b2fd9fe26feeed5758daa0aa6 Mon Sep 17 00:00:00 2001 From: Divma Date: Wed, 2 Aug 2023 00:59:34 +0000 Subject: [PATCH 28/75] upgrade to libp2p 0.52 (#4431) ## Issue Addressed Upgrade libp2p to v0.52 ## Proposed Changes - **Workflows**: remove installation of `protoc` - **Book**: remove installation of `protoc` - **`Dockerfile`s and `cross`**: remove custom base `Dockerfile` for cross since it's no longer needed. Remove `protoc` from remaining `Dockerfiles`s - **Upgrade `discv5` to `v0.3.1`:** we have some cool stuff in there: no longer needs `protoc` and faster ip updates on cold start - **Upgrade `prometheus` to `0.21.0`**, now it no longer needs encoding checks - **things that look like refactors:** bunch of api types were renamed and need to be accessed in a different (clearer) way - **Lighthouse network** - connection limits is now a behaviour - banned peers no longer exist on the swarm level, but at the behaviour level - `connection_event_buffer_size` now is handled per connection with a buffer size of 4 - `mplex` is deprecated and was removed - rpc handler now logs the peer to which it belongs ## Additional Info Tried to keep as much behaviour unchanged as possible. However, there is a great deal of improvements we can do _after_ this upgrade: - Smart connection limits: Connection limits have been checked only based on numbers, we can now use information about the incoming peer to decide if we want it - More powerful peer management: Dial attempts from other behaviours can be rejected early - Incoming connections can be rejected early - Banning can be returned exclusively to the peer management: We should not get connections to banned peers anymore making use of this - TCP Nat updates: We might be able to take advantage of confirmed external addresses to check out tcp ports/ips Co-authored-by: Age Manning Co-authored-by: Akihito Nakano --- .github/workflows/local-testnet.yml | 4 - .github/workflows/release.yml | 9 - .github/workflows/test-suite.yml | 68 +- Cargo.lock | 2654 ++++++----------- Cross.toml | 4 +- Dockerfile | 4 +- beacon_node/http_api/src/test_utils.rs | 11 +- beacon_node/http_metrics/src/metrics.rs | 8 +- beacon_node/lighthouse_network/Cargo.toml | 8 +- beacon_node/lighthouse_network/src/config.rs | 25 +- .../lighthouse_network/src/discovery/enr.rs | 4 +- .../src/discovery/enr_ext.rs | 100 +- .../lighthouse_network/src/discovery/mod.rs | 88 +- .../src/peer_manager/mod.rs | 3 +- .../src/peer_manager/network_behaviour.rs | 74 +- .../lighthouse_network/src/rpc/handler.rs | 417 +-- beacon_node/lighthouse_network/src/rpc/mod.rs | 139 +- .../lighthouse_network/src/rpc/protocol.rs | 14 +- .../src/rpc/self_limiter.rs | 4 +- .../src/service/api_types.rs | 2 +- .../src/service/behaviour.rs | 24 +- .../service/gossipsub_scoring_parameters.rs | 3 +- .../lighthouse_network/src/service/mod.rs | 170 +- .../lighthouse_network/src/service/utils.rs | 102 +- .../lighthouse_network/src/types/pubsub.rs | 10 +- .../lighthouse_network/tests/common.rs | 4 +- beacon_node/network/src/service.rs | 6 +- book/src/installation-source.md | 13 +- book/src/pi.md | 2 +- book/src/setup.md | 2 - boot_node/src/config.rs | 2 +- common/eth2_network_config/Cargo.toml | 2 +- lcli/Dockerfile | 4 +- scripts/cross/Dockerfile | 14 - testing/antithesis/Dockerfile.libvoidstar | 4 +- 35 files changed, 1594 insertions(+), 2408 deletions(-) delete mode 100644 scripts/cross/Dockerfile diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 9223c40e15..ea4c1e2488 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -21,10 +21,6 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install geth (ubuntu) if: matrix.os == 'ubuntu-22.04' run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8142184415..30e4211b88 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -79,15 +79,6 @@ jobs: if: startsWith(matrix.arch, 'x86_64-windows') run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - # ============================== - # Windows & Mac dependencies - # ============================== - - name: Install Protoc - if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows') - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - # ============================== # Builds # ============================== diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index ff7a9cf2f1..ab31b3a92b 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -60,10 +60,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in release @@ -83,7 +79,7 @@ jobs: node-version: '14' - name: Install windows build tools run: | - choco install python protoc visualstudio2019-workload-vctools -y + choco install python visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 @@ -108,10 +104,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run beacon_chain tests for all known forks run: make test-beacon-chain op-pool-tests: @@ -122,10 +114,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run operation_pool tests for all known forks run: make test-op-pool slasher-tests: @@ -148,10 +136,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in debug @@ -164,10 +148,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: @@ -180,10 +160,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run consensus-spec-tests with blst, milagro and fake_crypto run: make test-ef dockerfile-ubuntu: @@ -206,10 +182,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim that starts from an eth1 contract @@ -222,10 +194,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim and go through the merge transition @@ -238,10 +206,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run the beacon chain sim without an eth1 connection run: cargo run --release --bin simulator no-eth1-sim syncing-simulator-ubuntu: @@ -252,10 +216,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run the syncing simulator @@ -268,10 +228,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install geth run: | sudo add-apt-repository -y ppa:ethereum/ethereum @@ -303,10 +259,6 @@ jobs: dotnet-version: '6.0.201' - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release run: make test-exec-engine check-benchmarks: @@ -317,10 +269,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it run: make check-benches clippy: @@ -331,10 +279,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness @@ -347,10 +291,6 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run cargo check run: cargo check --workspace arbitrary-check: @@ -389,10 +329,6 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps run: cargo install cargo-udeps --locked --force - name: Create Cargo config dir @@ -410,7 +346,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install dependencies - run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler + run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang - name: Use Rust beta run: rustup override set beta - name: Run make diff --git a/Cargo.lock b/Cargo.lock index a7e8311231..b2e8188eec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ "gimli", ] @@ -79,15 +79,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array", -] - [[package]] name = "aead" version = "0.4.3" @@ -95,28 +86,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ "generic-array", - "rand_core 0.6.4", -] - -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "aes" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" -dependencies = [ - "aes-soft", - "aesni", - "cipher 0.2.5", ] [[package]] @@ -126,78 +95,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher", "cpufeatures", - "ctr 0.8.0", + "ctr", "opaque-debug", ] -[[package]] -name = "aes" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" -dependencies = [ - "cfg-if", - "cipher 0.4.4", - "cpufeatures", -] - [[package]] name = "aes-gcm" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.8.0", - "ghash 0.4.4", + "aead", + "aes", + "cipher", + "ctr", + "ghash", "subtle", ] -[[package]] -name = "aes-gcm" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" -dependencies = [ - "aead 0.5.2", - "aes 0.8.2", - "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", - "subtle", -] - -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - [[package]] name = "ahash" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -215,18 +139,30 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "amcl" version = "0.3.0" source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -256,9 +192,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" [[package]] name = "arbitrary" @@ -275,6 +211,15 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +[[package]] +name = "array-init" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72" +dependencies = [ + "nodrop", +] + [[package]] name = "arrayref" version = "0.3.7" @@ -283,76 +228,9 @@ checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" - -[[package]] -name = "asn1-rs" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ff05a702273012438132f449575dbc804e27b2f3cbe3069aa237d26c98fa33" -dependencies = [ - "asn1-rs-derive 0.1.0", - "asn1-rs-impl", - "displaydoc", - "nom 7.1.3", - "num-traits", - "rusticata-macros", - "thiserror", - "time 0.3.21", -] - -[[package]] -name = "asn1-rs" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" -dependencies = [ - "asn1-rs-derive 0.4.0", - "asn1-rs-impl", - "displaydoc", - "nom 7.1.3", - "num-traits", - "rusticata-macros", - "thiserror", - "time 0.3.21", -] - -[[package]] -name = "asn1-rs-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - -[[package]] -name = "asn1-rs-derive" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - -[[package]] -name = "asn1-rs-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "asn1_der" @@ -374,7 +252,7 @@ dependencies = [ "log", "parking", "polling", - "rustix", + "rustix 0.37.23", "slab", "socket2 0.4.9", "waker-fn", @@ -397,7 +275,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", ] [[package]] @@ -408,18 +286,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -435,23 +313,17 @@ dependencies = [ [[package]] name = "asynchronous-codec" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" +checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" dependencies = [ "bytes", "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", ] -[[package]] -name = "atomic-waker" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" - [[package]] name = "attohttpc" version = "0.16.3" @@ -504,13 +376,13 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c" dependencies = [ "async-trait", "axum-core", - "bitflags", + "bitflags 1.3.2", "bytes", "futures-util", "http", @@ -521,7 +393,7 @@ dependencies = [ "memchr", "mime", "percent-encoding", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "rustversion", "serde", "serde_json", @@ -553,15 +425,15 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", "cfg-if", "libc", - "miniz_oxide 0.6.2", + "miniz_oxide", "object", "rustc-demangle", ] @@ -592,9 +464,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f1e31e207a6b8fb791a38ea3105e6cb541f55e4d029902d3039a4ad07cc4105" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -663,7 +535,7 @@ dependencies = [ "slog", "sloggers", "slot_clock", - "smallvec", + "smallvec 1.11.0", "ssz_types", "state_processing", "store", @@ -752,7 +624,7 @@ version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -771,6 +643,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" + [[package]] name = "bitvec" version = "0.20.4" @@ -823,16 +701,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block-modes" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" -dependencies = [ - "block-padding", - "cipher 0.2.5", -] - [[package]] name = "block-padding" version = "0.2.1" @@ -914,6 +782,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +[[package]] +name = "bs58" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +dependencies = [ + "tinyvec", +] + [[package]] name = "buf_redux" version = "0.8.4" @@ -938,9 +815,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.2" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -994,25 +871,25 @@ dependencies = [ "ethereum_ssz_derive", "quickcheck", "quickcheck_macros", - "smallvec", + "smallvec 1.11.0", "ssz_types", "tree_hash", ] [[package]] name = "camino" -version = "1.1.4" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" dependencies = [ "serde", ] [[package]] name = "cargo-platform" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" dependencies = [ "serde", ] @@ -1025,7 +902,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.17", + "semver 1.0.18", "serde", "serde_json", "thiserror", @@ -1043,17 +920,6 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" -[[package]] -name = "ccm" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7" -dependencies = [ - "aead 0.3.2", - "cipher 0.2.5", - "subtle", -] - [[package]] name = "cexpr" version = "0.6.0" @@ -1076,7 +942,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher", "cpufeatures", "zeroize", ] @@ -1087,22 +953,22 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ - "aead 0.4.3", + "aead", "chacha20", - "cipher 0.3.0", + "cipher", "poly1305", "zeroize", ] [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "serde", "time 0.1.45", @@ -1110,15 +976,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - [[package]] name = "cipher" version = "0.3.0" @@ -1128,16 +985,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - [[package]] name = "clang-sys" version = "1.6.1" @@ -1157,7 +1004,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap", "unicode-width", @@ -1217,7 +1064,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.21", + "time 0.3.24", "timer", "tokio", "types", @@ -1258,9 +1105,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" [[package]] name = "convert_case" @@ -1295,28 +1142,13 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] -[[package]] -name = "crc" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" - [[package]] name = "crc32fast" version = "1.3.2" @@ -1385,22 +1217,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", - "memoffset 0.8.0", + "memoffset 0.9.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -1442,7 +1274,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core 0.6.4", "typenum", ] @@ -1468,9 +1299,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" +checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" dependencies = [ "csv-core", "itoa", @@ -1493,26 +1324,17 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher 0.3.0", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] name = "ctrlc" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04d778600249295e82b6ab12e291ed9029407efee0cfb7baf67157edc65964df" +checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix 0.26.2", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -1530,9 +1352,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.2" +version = "4.0.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" +checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" dependencies = [ "cfg-if", "digest 0.10.7", @@ -1549,18 +1371,8 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core 0.14.4", - "darling_macro 0.14.4", + "darling_core", + "darling_macro", ] [[package]] @@ -1577,38 +1389,13 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core 0.14.4", + "darling_core", "quote", "syn 1.0.109", ] @@ -1702,7 +1489,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.10.6", + "sha2 0.10.7", "tree_hash", "types", ] @@ -1714,47 +1501,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", - "pem-rfc7468", "zeroize", ] [[package]] name = "der" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56acb310e15652100da43d130af8d97b509e95af61aab1c5a7939ef24337ee17" +checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" dependencies = [ "const-oid", + "pem-rfc7468", "zeroize", ] [[package]] -name = "der-parser" -version = "7.0.0" +name = "deranged" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" -dependencies = [ - "asn1-rs 0.3.1", - "displaydoc", - "nom 7.1.3", - "num-bigint", - "num-traits", - "rusticata-macros", -] - -[[package]] -name = "der-parser" -version = "8.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" -dependencies = [ - "asn1-rs 0.5.2", - "displaydoc", - "nom 7.1.3", - "num-bigint", - "num-traits", - "rusticata-macros", -] +checksum = "8810e7e2cf385b1e9b50d68264908ec367ba642c96d02edfe61c39e88e2a3c01" [[package]] name = "derivative" @@ -1775,38 +1540,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", -] - -[[package]] -name = "derive_builder" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" -dependencies = [ - "darling 0.14.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder_macro" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" -dependencies = [ - "derive_builder_core", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] @@ -1824,11 +1558,11 @@ dependencies = [ [[package]] name = "diesel" -version = "2.0.4" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72eb77396836a4505da85bae0712fa324b74acfe1876d7c2f7e694ef3d0ee373" +checksum = "f7a532c1f99a0f596f6960a60d1e119e91582b24b39e2d83a190e61262c3ef0c" dependencies = [ - "bitflags", + "bitflags 2.3.3", "byteorder", "diesel_derives", "itoa", @@ -1838,27 +1572,36 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad74fdcf086be3d4fdd142f67937678fe60ed431c3b2f08599e7687269410c4" +checksum = "74398b79d81e52e130d991afeed9c86034bb1b7735f46d2f5bf7deb261d80303" dependencies = [ - "proc-macro-error", + "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] name = "diesel_migrations" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9ae22beef5e9d6fab9225ddb073c1c6c1a7a6ded5019d5da11d1e5c5adc34e2" +checksum = "6036b3f0120c5961381b570ee20a02432d7e2d27ea60de9578799cf9156914ac" dependencies = [ "diesel", "migrations_internals", "migrations_macros", ] +[[package]] +name = "diesel_table_macro_syntax" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" +dependencies = [ + "syn 2.0.28", +] + [[package]] name = "digest" version = "0.9.0" @@ -1932,28 +1675,29 @@ dependencies = [ [[package]] name = "discv5" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f32d27968ba86689e3f0eccba0383414348a6fc5918b0a639c98dd81e20ed6" +checksum = "98c05fa26996c6141f78ac4fafbe297a7fa69690565ba4e0d1f2e60bde5ce501" dependencies = [ - "aes 0.7.5", - "aes-gcm 0.9.4", + "aes", + "aes-gcm", "arrayvec", "delay_map", - "enr 0.8.1", + "enr 0.9.0", "fnv", "futures", "hashlink 0.7.0", "hex", "hkdf", "lazy_static", - "libp2p-core 0.36.0", + "libp2p-core", + "libp2p-identity", "lru 0.7.8", "more-asserts", "parking_lot 0.11.2", "rand 0.8.5", "rlp", - "smallvec", + "smallvec 1.11.0", "socket2 0.4.9", "tokio", "tracing", @@ -1962,22 +1706,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "displaydoc" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.16", -] - [[package]] name = "dtoa" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65d09067bfacaa79114679b279d7f5885b53295b1e2cfb4e79c8e4bd3d633169" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "dunce" @@ -1999,11 +1732,11 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0997c976637b606099b9985693efa3581e84e41f5c11ba5255f88711058ad428" +checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ - "der 0.7.6", + "der 0.7.7", "digest 0.10.7", "elliptic-curve 0.13.5", "rfc6979 0.4.0", @@ -2046,15 +1779,15 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0-rc.2" +version = "2.0.0-pre.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "798f704d128510932661a3489b08e3f4c934a01d61c5def59ae7b8e48f19665a" +checksum = "7bd577ba9d4bcab443cac60003d8fd32c638e7024a3ec92c200d7af5d2c397ed" dependencies = [ - "curve25519-dalek 4.0.0-rc.2", + "curve25519-dalek 4.0.0-rc.1", "ed25519 2.2.1", "rand_core 0.6.4", "serde", - "sha2 0.10.6", + "sha2 0.10.7", "zeroize", ] @@ -2091,9 +1824,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" @@ -2108,8 +1841,6 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "hkdf", - "pem-rfc7468", "pkcs8 0.9.0", "rand_core 0.6.4", "sec1 0.3.0", @@ -2129,9 +1860,10 @@ dependencies = [ "ff 0.13.0", "generic-array", "group 0.13.0", + "pem-rfc7468", "pkcs8 0.10.2", "rand_core 0.6.4", - "sec1 0.7.2", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -2152,7 +1884,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" dependencies = [ "base64 0.13.1", - "bs58", + "bs58 0.4.0", "bytes", "hex", "k256 0.11.6", @@ -2166,19 +1898,20 @@ dependencies = [ [[package]] name = "enr" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf56acd72bb22d2824e66ae8e9e5ada4d0de17a69c7fd35569dde2ada8ec9116" +checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "bytes", - "ed25519-dalek 2.0.0-rc.2", + "ed25519-dalek 2.0.0-pre.0", "hex", "k256 0.13.1", "log", "rand 0.8.5", "rlp", "serde", + "serde-hex", "sha3 0.10.8", "zeroize", ] @@ -2241,14 +1974,20 @@ dependencies = [ ] [[package]] -name = "errno" -version = "0.3.1" +name = "equivalent" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -2379,7 +2118,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.10.6", + "sha2 0.10.7", "zeroize", ] @@ -2387,7 +2126,7 @@ dependencies = [ name = "eth2_keystore" version = "0.1.0" dependencies = [ - "aes 0.7.5", + "aes", "bls", "eth2_key_derivation", "hex", @@ -2401,7 +2140,7 @@ dependencies = [ "sha2 0.9.9", "tempfile", "unicode-normalization", - "uuid 0.8.2", + "uuid", "zeroize", ] @@ -2431,7 +2170,7 @@ dependencies = [ "serde_repr", "tempfile", "tiny-bip39", - "uuid 0.8.2", + "uuid", ] [[package]] @@ -2510,7 +2249,7 @@ source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380 dependencies = [ "async-stream", "blst", - "bs58", + "bs58 0.4.0", "enr 0.6.2", "hex", "integer-sqrt", @@ -2566,7 +2305,7 @@ dependencies = [ "cpufeatures", "lazy_static", "ring", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -2584,22 +2323,22 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32749e96305376af40d7a7ee8ea4c4c64c68d09ff94a81ab78c8d9bc7153c221" +checksum = "e61ffea29f26e8249d35128a82ec8d3bd4fbc80179ea5f5e5e3daafef6a80fcb" dependencies = [ "ethereum-types 0.14.1", "itertools", - "smallvec", + "smallvec 1.11.0", ] [[package]] name = "ethereum_ssz_derive" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9cac7ef2107926cea34c0064056f9bb134d2085eef882388d151d2e59174cf0" +checksum = "6085d7fd3cf84bd2b8fec150d54c8467fb491d8db9c460607c5534f653a0ee38" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", "syn 1.0.109", @@ -2635,7 +2374,7 @@ dependencies = [ "dunce", "ethers-core", "eyre", - "getrandom 0.2.9", + "getrandom 0.2.10", "hex", "proc-macro2", "quote", @@ -2644,7 +2383,7 @@ dependencies = [ "serde", "serde_json", "syn 1.0.109", - "toml", + "toml 0.5.11", "url", "walkdir", ] @@ -2707,7 +2446,7 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.9", + "getrandom 0.2.10", "hashers", "hex", "http", @@ -2851,6 +2590,12 @@ dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" + [[package]] name = "ff" version = "0.12.1" @@ -2885,11 +2630,11 @@ checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "field-offset" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3cf3a800ff6e860c863ca6d4b16fd999db8b752819c1606884047b73e468535" +checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ - "memoffset 0.8.0", + "memoffset 0.9.0", "rustc_version 0.4.0", ] @@ -2926,12 +2671,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - [[package]] name = "flate2" version = "1.0.26" @@ -2940,7 +2679,7 @@ checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide 0.7.1", + "miniz_oxide", ] [[package]] @@ -2981,9 +2720,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -3065,12 +2804,12 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ - "fastrand", + "fastrand 1.9.0", "futures-core", "futures-io", "memchr", "parking", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "waker-fn", ] @@ -3082,7 +2821,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -3108,6 +2847,17 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +[[package]] +name = "futures-ticker" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" +dependencies = [ + "futures", + "futures-timer", + "instant", +] + [[package]] name = "futures-timer" version = "3.0.2" @@ -3127,7 +2877,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "pin-utils", "slab", ] @@ -3188,9 +2938,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "js-sys", @@ -3206,24 +2956,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug", - "polyval 0.5.3", -] - -[[package]] -name = "ghash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" -dependencies = [ - "opaque-debug", - "polyval 0.6.0", + "polyval", ] [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "git-version" @@ -3277,9 +3017,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", @@ -3287,7 +3027,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util 0.7.8", @@ -3342,6 +3082,16 @@ dependencies = [ "ahash 0.8.3", ] +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +dependencies = [ + "ahash 0.8.3", + "allocator-api2", +] + [[package]] name = "hashers" version = "1.0.1" @@ -3362,11 +3112,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" +checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.0", ] [[package]] @@ -3376,7 +3126,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "bytes", "headers-core", "http", @@ -3411,18 +3161,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.2.6" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -3515,7 +3256,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", ] [[package]] @@ -3523,7 +3264,7 @@ name = "http_api" version = "0.1.0" dependencies = [ "beacon_chain", - "bs58", + "bs58 0.4.0", "bytes", "directory", "environment", @@ -3606,9 +3347,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -3620,7 +3361,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "socket2 0.4.9", "tokio", "tower-service", @@ -3630,15 +3371,16 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", - "rustls 0.21.1", + "rustls 0.21.5", "tokio", - "tokio-rustls 0.24.0", + "tokio-rustls 0.24.1", ] [[package]] @@ -3656,9 +3398,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3696,9 +3438,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -3782,7 +3524,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.5.0", + "parity-scale-codec 3.6.4", ] [[package]] @@ -3840,12 +3582,13 @@ dependencies = [ ] [[package]] -name = "inout" -version = "0.1.3" +name = "indexmap" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ - "generic-array", + "equivalent", + "hashbrown 0.14.0", ] [[package]] @@ -3878,53 +3621,34 @@ dependencies = [ "num-traits", ] -[[package]] -name = "interceptor" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8a11ae2da61704edada656798b61c94b35ecac2c58eb955156987d5e6be90b" -dependencies = [ - "async-trait", - "bytes", - "log", - "rand 0.8.5", - "rtcp", - "rtp", - "thiserror", - "tokio", - "waitgroup", - "webrtc-srtp", - "webrtc-util", -] - [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi 0.3.2", "libc", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] name = "ipconfig" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.4.9", - "widestring 0.5.1", - "winapi", - "winreg", + "socket2 0.5.3", + "widestring 1.0.2", + "windows-sys", + "winreg 0.50.0", ] [[package]] name = "ipnet" -version = "2.7.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "itertools" @@ -3937,15 +3661,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jemalloc-ctl" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1891c671f3db85d8ea8525dd43ab147f9977041911d24a03e5a36187a7bfde9" +checksum = "7cffc705424a344c054e135d12ee591402f4539245e8bbd64e6c9eaa9458b63c" dependencies = [ "jemalloc-sys", "libc", @@ -3954,9 +3678,9 @@ dependencies = [ [[package]] name = "jemalloc-sys" -version = "0.5.3+5.3.0-patched" +version = "0.5.4+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9bd5d616ea7ed58b571b2e209a65759664d7fb021a0819d7a790afc67e47ca1" +checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" dependencies = [ "cc", "libc", @@ -3964,9 +3688,9 @@ dependencies = [ [[package]] name = "jemallocator" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16c2514137880c52b0b4822b563fadd38257c1f380858addb74a400889696ea6" +checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" dependencies = [ "jemalloc-sys", "libc", @@ -3974,9 +3698,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -3987,7 +3711,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "pem", "ring", "serde", @@ -4004,7 +3728,7 @@ dependencies = [ "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.6", + "sha2 0.10.7", "sha3 0.10.8", ] @@ -4015,10 +3739,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ "cfg-if", - "ecdsa 0.16.7", + "ecdsa 0.16.8", "elliptic-curve 0.13.5", "once_cell", - "sha2 0.10.6", + "sha2 0.10.7", "signature 2.1.0", ] @@ -4119,9 +3843,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.144" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libflate" @@ -4170,10 +3894,10 @@ name = "libmdbx" version = "0.1.4" source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "derive_more", - "indexmap", + "indexmap 1.9.3", "libc", "mdbx-sys", "parking_lot 0.12.1", @@ -4182,112 +3906,63 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.50.1" +version = "0.52.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7b0104790be871edcf97db9bd2356604984e623a08d825c3f27852290266b8" +checksum = "38039ba2df4f3255842050845daef4a004cc1f26da03dbc645535088b51910ef" dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.9", + "getrandom 0.2.10", "instant", - "libp2p-core 0.38.0", + "libp2p-allow-block-list", + "libp2p-connection-limits", + "libp2p-core", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", + "libp2p-identity", "libp2p-mdns", "libp2p-metrics", - "libp2p-mplex", "libp2p-noise", "libp2p-plaintext", - "libp2p-quic", "libp2p-swarm", "libp2p-tcp", - "libp2p-webrtc", "libp2p-websocket", "libp2p-yamux", - "multiaddr 0.16.0", - "parking_lot 0.12.1", + "multiaddr 0.18.0", "pin-project", - "smallvec", ] [[package]] -name = "libp2p-core" -version = "0.36.0" +name = "libp2p-allow-block-list" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1fff5bd889c82a0aec668f2045edd066f559d4e5c40354e5a4c77ac00caac38" +checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek 1.0.1", - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "lazy_static", - "libsecp256k1", - "log", - "multiaddr 0.14.0", - "multihash 0.16.3", - "multistream-select 0.11.0", - "p256", - "parking_lot 0.12.1", - "pin-project", - "prost", - "prost-build", - "rand 0.8.5", - "rw-stream-sink", - "sha2 0.10.6", - "smallvec", - "thiserror", - "unsigned-varint 0.7.1", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", "void", - "zeroize", ] [[package]] -name = "libp2p-core" -version = "0.38.0" +name = "libp2p-connection-limits" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6a8fcd392ff67af6cc3f03b1426c41f7f26b6b9aff2dc632c1c56dd649e571f" +checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek 1.0.1", - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "libsecp256k1", - "log", - "multiaddr 0.16.0", - "multihash 0.16.3", - "multistream-select 0.12.1", - "once_cell", - "p256", - "parking_lot 0.12.1", - "pin-project", - "prost", - "prost-build", - "rand 0.8.5", - "rw-stream-sink", - "sec1 0.3.0", - "sha2 0.10.6", - "smallvec", - "thiserror", - "unsigned-varint 0.7.1", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", "void", - "zeroize", ] [[package]] name = "libp2p-core" -version = "0.39.2" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" +checksum = "ef7dd7b09e71aac9271c60031d0e558966cdb3253ba0308ab369bb2de80630d0" dependencies = [ "either", "fnv", @@ -4296,16 +3971,16 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr 0.17.1", - "multihash 0.17.0", - "multistream-select 0.12.1", + "multiaddr 0.18.0", + "multihash 0.19.0", + "multistream-select", "once_cell", "parking_lot 0.12.1", "pin-project", "quick-protobuf", "rand 0.8.5", "rw-stream-sink", - "smallvec", + "smallvec 1.11.0", "thiserror", "unsigned-varint 0.7.1", "void", @@ -4313,102 +3988,111 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.38.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e42a271c1b49f789b92f7fc87749fa79ce5c7bdc88cbdfacb818a4bca47fec5" +checksum = "fd4394c81c0c06d7b4a60f3face7e8e8a9b246840f98d2c80508d0721b032147" dependencies = [ "futures", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "log", "parking_lot 0.12.1", - "smallvec", + "smallvec 1.11.0", "trust-dns-resolver", ] [[package]] name = "libp2p-gossipsub" -version = "0.43.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a173171c71c29bb156f98886c7c4824596de3903dadf01e2e79d2ccdcf38cd9f" +checksum = "8e378da62e8c9251f6e885ed173a561663f29b251e745586cf6ae6150b295c37" dependencies = [ "asynchronous-codec", - "base64 0.13.1", + "base64 0.21.2", "byteorder", "bytes", + "either", "fnv", "futures", + "futures-ticker", + "getrandom 0.2.10", "hex_fmt", "instant", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "libp2p-swarm", "log", "prometheus-client", - "prost", - "prost-build", - "prost-codec", + "quick-protobuf", + "quick-protobuf-codec", "rand 0.8.5", "regex", - "sha2 0.10.6", - "smallvec", - "thiserror", + "sha2 0.10.7", + "smallvec 1.11.0", "unsigned-varint 0.7.1", - "wasm-timer", + "void", ] [[package]] name = "libp2p-identify" -version = "0.41.1" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c052d0026f4817b44869bfb6810f4e1112f43aec8553f2cb38881c524b563abf" +checksum = "6a29675a32dbcc87790db6cf599709e64308f1ae9d5ecea2d259155889982db8" dependencies = [ "asynchronous-codec", + "either", "futures", "futures-timer", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "libp2p-swarm", "log", - "lru 0.8.1", - "prost", - "prost-build", - "prost-codec", - "smallvec", + "lru 0.10.1", + "quick-protobuf", + "quick-protobuf-codec", + "smallvec 1.11.0", "thiserror", "void", ] [[package]] name = "libp2p-identity" -version = "0.1.2" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" +checksum = "a38d6012784fe4cc14e6d443eb415b11fc7c456dc15d9f0d90d9b70bc7ac3ec1" dependencies = [ - "bs58", + "asn1_der", + "bs58 0.5.0", "ed25519-dalek 1.0.1", + "libsecp256k1", "log", - "multiaddr 0.17.1", - "multihash 0.17.0", + "multihash 0.19.0", + "p256", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.6", + "sec1 0.7.3", + "sha2 0.10.7", "thiserror", + "void", "zeroize", ] [[package]] name = "libp2p-mdns" -version = "0.42.0" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f378264aade9872d6ccd315c0accc18be3a35d15fc1b9c36e5b6f983b62b5b" +checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" dependencies = [ "data-encoding", "futures", "if-watch", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "libp2p-swarm", "log", "rand 0.8.5", - "smallvec", - "socket2 0.4.9", + "smallvec 1.11.0", + "socket2 0.5.3", "tokio", "trust-dns-proto", "void", @@ -4416,224 +4100,143 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad8a64f29da86005c86a4d2728b8a0719e9b192f4092b609fd8790acb9dec55" +checksum = "3787ea81798dcc5bf1d8b40a8e8245cf894b168d04dd70aa48cb3ff2fff141d2" dependencies = [ - "libp2p-core 0.38.0", + "instant", + "libp2p-core", "libp2p-gossipsub", "libp2p-identify", + "libp2p-identity", "libp2p-swarm", + "once_cell", "prometheus-client", ] -[[package]] -name = "libp2p-mplex" -version = "0.38.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03805b44107aa013e7cbbfa5627b31c36cbedfdfb00603c0311998882bc4bace" -dependencies = [ - "asynchronous-codec", - "bytes", - "futures", - "libp2p-core 0.38.0", - "log", - "nohash-hasher", - "parking_lot 0.12.1", - "rand 0.8.5", - "smallvec", - "unsigned-varint 0.7.1", -] - [[package]] name = "libp2p-noise" -version = "0.41.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a978cb57efe82e892ec6f348a536bfbd9fee677adbe5689d7a93ad3a9bffbf2e" +checksum = "87945db2b3f977af09b62b9aa0a5f3e4870995a577ecd845cdeba94cdf6bbca7" dependencies = [ "bytes", "curve25519-dalek 3.2.0", "futures", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "log", + "multiaddr 0.18.0", + "multihash 0.19.0", "once_cell", - "prost", - "prost-build", + "quick-protobuf", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "snow", "static_assertions", "thiserror", - "x25519-dalek 1.1.1", + "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-plaintext" -version = "0.38.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c43ab37fb4102682ae9a248dc2e6a8e7b941ec75cf24aed103060a788e0fd15" +checksum = "37266c683a757df713f7dcda0cdcb5ad4681355ffa1b37b77c113c176a531195" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "log", - "prost", - "prost-build", + "quick-protobuf", "unsigned-varint 0.7.1", - "void", -] - -[[package]] -name = "libp2p-quic" -version = "0.7.0-alpha" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e7c867e95c8130667b24409d236d37598270e6da69b3baf54213ba31ffca59" -dependencies = [ - "bytes", - "futures", - "futures-timer", - "if-watch", - "libp2p-core 0.38.0", - "libp2p-tls", - "log", - "parking_lot 0.12.1", - "quinn-proto", - "rand 0.8.5", - "rustls 0.20.8", - "thiserror", - "tokio", ] [[package]] name = "libp2p-swarm" -version = "0.41.1" +version = "0.43.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a35472fe3276b3855c00f1c032ea8413615e030256429ad5349cdf67c6e1a0" +checksum = "43106820057e0f65c77b01a3873593f66e676da4e40c70c3a809b239109f1d30" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "libp2p-swarm-derive", "log", - "pin-project", + "multistream-select", + "once_cell", "rand 0.8.5", - "smallvec", - "thiserror", + "smallvec 1.11.0", "tokio", "void", ] [[package]] name = "libp2p-swarm-derive" -version = "0.31.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d527d5827582abd44a6d80c07ff8b50b4ee238a8979e05998474179e79dc400" +checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ "heck", + "proc-macro-warning", + "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] name = "libp2p-tcp" -version = "0.38.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b257baf6df8f2df39678b86c578961d48cc8b68642a12f0f763f56c8e5858d" +checksum = "09bfdfb6f945c5c014b87872a0bdb6e0aef90e92f380ef57cd9013f118f9289d" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core 0.38.0", - "log", - "socket2 0.4.9", - "tokio", -] - -[[package]] -name = "libp2p-tls" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" -dependencies = [ - "futures", - "futures-rustls", - "libp2p-core 0.39.2", + "libp2p-core", "libp2p-identity", - "rcgen 0.10.0", - "ring", - "rustls 0.20.8", - "thiserror", - "webpki 0.22.0", - "x509-parser 0.14.0", - "yasna", -] - -[[package]] -name = "libp2p-webrtc" -version = "0.4.0-alpha" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb6cd86dd68cba72308ea05de1cebf3ba0ae6e187c40548167955d4e3970f6a" -dependencies = [ - "async-trait", - "asynchronous-codec", - "bytes", - "futures", - "futures-timer", - "hex", - "if-watch", - "libp2p-core 0.38.0", - "libp2p-noise", "log", - "multihash 0.16.3", - "prost", - "prost-build", - "prost-codec", - "rand 0.8.5", - "rcgen 0.9.3", - "serde", - "stun", - "thiserror", - "tinytemplate", + "socket2 0.5.3", "tokio", - "tokio-util 0.7.8", - "webrtc", ] [[package]] name = "libp2p-websocket" -version = "0.40.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d705506030d5c0aaf2882437c70dab437605f21c5f9811978f694e6917a3b54" +checksum = "956d981ebc84abc3377e5875483c06d94ff57bc6b25f725047f9fd52592f72d4" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "log", "parking_lot 0.12.1", "quicksink", "rw-stream-sink", "soketto", "url", - "webpki-roots", + "webpki-roots 0.23.1", ] [[package]] name = "libp2p-yamux" -version = "0.42.0" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f63594a0aa818642d9d4915c791945053877253f08a3626f13416b5cd928a29" +checksum = "c0a9b42ab6de15c6f076d8fb11dc5f48d899a10b55a2e16b12be9012a05287b0" dependencies = [ "futures", - "libp2p-core 0.38.0", + "libp2p-core", "log", - "parking_lot 0.12.1", "thiserror", "yamux", ] @@ -4699,9 +4302,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.9" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" dependencies = [ "cc", "pkg-config", @@ -4785,11 +4388,11 @@ dependencies = [ "regex", "serde", "serde_derive", - "sha2 0.10.6", + "sha2 0.10.7", "slog", "slog-async", "slog-term", - "smallvec", + "smallvec 1.11.0", "snap", "ssz_types", "strum", @@ -4829,12 +4432,18 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" +[[package]] +name = "linux-raw-sys" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" + [[package]] name = "lmdb-rkv" version = "0.14.0" source = "git+https://github.com/sigp/lmdb-rs?rev=f33845c6469b94265319aac0ed5085597862c27e#f33845c6469b94265319aac0ed5085597862c27e" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "libc", "lmdb-rkv-sys", @@ -4852,9 +4461,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg 1.1.0", "scopeguard", @@ -4870,12 +4479,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "logging" @@ -4906,11 +4512,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.8.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" +checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" dependencies = [ - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -4968,7 +4574,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -4979,9 +4585,15 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" +checksum = "67827e6ea8ee8a7c4a72227ef4fc08957040acffdb5f122733b24fa12daff41b" + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "md-5" @@ -5005,9 +4617,9 @@ dependencies = [ [[package]] name = "mediatype" -version = "0.19.13" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea6e62614ab2fc0faa58bb15102a0382d368f896a9fa4776592589ab55c4de7" +checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" [[package]] name = "memchr" @@ -5026,9 +4638,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg 1.1.0", ] @@ -5047,24 +4659,24 @@ dependencies = [ [[package]] name = "metastruct" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "734788dec2091fe9afa39530ca2ea7994f4a2c9aff3dbfebb63f2c1945c6f10b" +checksum = "ccfbb8826226b09b05bb62a0937cf6abb16f1f7d4b746eb95a83db14aec60f06" dependencies = [ "metastruct_macro", ] [[package]] name = "metastruct_macro" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ded15e7570c2a507a23e6c3a1c8d74507b779476e43afe93ddfc261d44173d" +checksum = "37cb4045d5677b7da537f8cb5d0730d5b6414e3cc81c61e4b50e1f0cbdc73909" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", - "smallvec", + "smallvec 1.11.0", "syn 1.0.109", ] @@ -5091,19 +4703,19 @@ dependencies = [ [[package]] name = "migrations_internals" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c493c09323068c01e54c685f7da41a9ccf9219735c3766fbfd6099806ea08fbc" +checksum = "0f23f71580015254b020e856feac3df5878c2c7a8812297edd6c0a485ac9dada" dependencies = [ "serde", - "toml", + "toml 0.7.6", ] [[package]] name = "migrations_macros" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a8ff27a350511de30cdabb77147501c36ef02e0451d957abea2f30caffb2b58" +checksum = "cce3325ac70e67bbab5bd837a31cae01f1a6db64e0e744a33cb03a543469ef08" dependencies = [ "migrations_internals", "proc-macro2", @@ -5144,15 +4756,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.7.1" @@ -5164,14 +4767,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] @@ -5207,7 +4809,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c580bfdd8803cce319b047d239559a22f809094aaea4ac13902a1fdcfcd4261" dependencies = [ "arrayref", - "bs58", + "bs58 0.4.0", "byteorder", "data-encoding", "multihash 0.16.3", @@ -5220,34 +4822,16 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aebdb21e90f81d13ed01dc84123320838e53963c2ca94b60b305d3fa64f31e" +checksum = "92a651988b3ed3ad1bc8c87d016bb92f6f395b84ed1db9b926b32b1fc5a2c8b5" dependencies = [ "arrayref", "byteorder", "data-encoding", + "libp2p-identity", "multibase", - "multihash 0.16.3", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.1", - "url", -] - -[[package]] -name = "multiaddr" -version = "0.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" -dependencies = [ - "arrayref", - "byteorder", - "data-encoding", - "log", - "multibase", - "multihash 0.17.0", + "multihash 0.19.0", "percent-encoding", "serde", "static_assertions", @@ -5275,18 +4859,17 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive", - "sha2 0.10.6", + "sha2 0.10.7", "unsigned-varint 0.7.1", ] [[package]] name = "multihash" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +checksum = "2fd59dcc2bbe70baabeac52cd22ae52c55eefe6c38ff11a9439f16a350a939f2" dependencies = [ "core2", - "multihash-derive", "unsigned-varint 0.7.1", ] @@ -5304,12 +4887,6 @@ dependencies = [ "synstructure", ] -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - [[package]] name = "multipart" version = "0.18.0" @@ -5330,29 +4907,15 @@ dependencies = [ [[package]] name = "multistream-select" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "363a84be6453a70e63513660f4894ef815daf88e3356bffcda9ca27d810ce83b" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" dependencies = [ "bytes", "futures", "log", "pin-project", - "smallvec", - "unsigned-varint 0.7.1", -] - -[[package]] -name = "multistream-select" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" -dependencies = [ - "bytes", - "futures", - "log", - "pin-project", - "smallvec", + "smallvec 1.11.0", "unsigned-varint 0.7.1", ] @@ -5393,7 +4956,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" dependencies = [ "anyhow", - "bitflags", + "bitflags 1.3.2", "byteorder", "libc", "netlink-packet-core", @@ -5477,7 +5040,7 @@ dependencies = [ "slog-term", "sloggers", "slot_clock", - "smallvec", + "smallvec 1.11.0", "ssz_types", "store", "strum", @@ -5494,7 +5057,7 @@ version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cc", "cfg-if", "libc", @@ -5507,10 +5070,9 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", - "memoffset 0.6.5", ] [[package]] @@ -5519,7 +5081,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", "static_assertions", @@ -5541,6 +5103,12 @@ dependencies = [ "validator_dir", ] +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + [[package]] name = "nohash-hasher" version = "0.2.0" @@ -5608,7 +5176,7 @@ dependencies = [ "num-traits", "rand 0.7.3", "serde", - "smallvec", + "smallvec 1.11.0", "zeroize", ] @@ -5635,20 +5203,20 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg 1.1.0", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi 0.3.2", "libc", ] @@ -5663,36 +5231,18 @@ dependencies = [ [[package]] name = "object" -version = "0.30.3" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] -[[package]] -name = "oid-registry" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38e20717fa0541f39bd146692035c37bedfa532b3e5071b35761082407546b2a" -dependencies = [ - "asn1-rs 0.3.1", -] - -[[package]] -name = "oid-registry" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" -dependencies = [ - "asn1-rs 0.5.2", -] - [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oneshot_broadcast" @@ -5744,7 +5294,7 @@ version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "foreign-types", "libc", @@ -5761,7 +5311,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -5772,9 +5322,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.3+1.1.1t" +version = "111.26.0+1.1.1u" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "924757a6a226bf60da5f7dd0311a34d2b52283dd82ddeb103208ddc66362f80c" +checksum = "efc62c9f12b22b8f5208c23a7200a442b2e5999f8bdf80233852122b5a4f6f37" dependencies = [ "cc", ] @@ -5824,24 +5374,14 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "p256" -version = "0.11.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.6", -] - -[[package]] -name = "p384" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" -dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.6", + "ecdsa 0.16.8", + "elliptic-curve 0.13.5", + "primeorder", + "sha2 0.10.7", ] [[package]] @@ -5870,15 +5410,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.5.0" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" +checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.1.4", + "parity-scale-codec-derive 3.6.4", "serde", ] @@ -5896,9 +5436,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.4" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5930,7 +5470,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -5943,28 +5483,28 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec", + "smallvec 1.11.0", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", - "smallvec", - "windows-sys 0.45.0", + "redox_syscall 0.3.5", + "smallvec 1.11.0", + "windows-targets", ] [[package]] name = "paste" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pbkdf2" @@ -6001,28 +5541,18 @@ dependencies = [ [[package]] name = "pem-rfc7468" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" dependencies = [ "base64ct", ] [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" - -[[package]] -name = "petgraph" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" -dependencies = [ - "fixedbitset", - "indexmap", -] +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pharos" @@ -6036,40 +5566,40 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" dependencies = [ "phf_shared", ] [[package]] name = "phf_shared" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -6080,9 +5610,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" @@ -6106,7 +5636,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.6", + "der 0.7.7", "spki 0.7.2", ] @@ -6130,9 +5660,9 @@ checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" [[package]] name = "plotters" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" dependencies = [ "num-traits", "plotters-backend", @@ -6143,15 +5673,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" [[package]] name = "plotters-svg" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" dependencies = [ "plotters-backend", ] @@ -6163,13 +5693,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg 1.1.0", - "bitflags", + "bitflags 1.3.2", "cfg-if", "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.9", - "windows-sys 0.48.0", + "pin-project-lite 0.2.10", + "windows-sys", ] [[package]] @@ -6180,7 +5710,7 @@ checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash 0.4.1", + "universal-hash", ] [[package]] @@ -6192,19 +5722,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.4.1", -] - -[[package]] -name = "polyval" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -6213,7 +5731,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "byteorder", "bytes", "fallible-iterator", @@ -6221,7 +5739,7 @@ dependencies = [ "md-5", "memchr", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "stringprep", ] @@ -6260,13 +5778,12 @@ dependencies = [ ] [[package]] -name = "prettyplease" -version = "0.1.25" +name = "primeorder" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +checksum = "3c2fcef82c0ec6eefcc179b978446c399b3cdf73c392c35604e399eee6df1ee3" dependencies = [ - "proc-macro2", - "syn 1.0.109", + "elliptic-curve 0.13.5", ] [[package]] @@ -6303,7 +5820,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ "thiserror", - "toml", + "toml 0.5.11", ] [[package]] @@ -6337,10 +5854,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] -name = "proc-macro2" -version = "1.0.63" +name = "proc-macro-warning" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.28", +] + +[[package]] +name = "proc-macro2" +version = "1.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] @@ -6374,94 +5902,27 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.18.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" +checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" dependencies = [ "dtoa", "itoa", "parking_lot 0.12.1", - "prometheus-client-derive-text-encode", + "prometheus-client-derive-encode", ] [[package]] -name = "prometheus-client-derive-text-encode" -version = "0.3.0" +name = "prometheus-client-derive-encode" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" +checksum = "72b6a5217beb0ad503ee7fa752d451c905113d70721b937126158f3106a48cc1" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] -[[package]] -name = "prost" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" -dependencies = [ - "bytes", - "heck", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn 1.0.109", - "tempfile", - "which", -] - -[[package]] -name = "prost-codec" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc34979ff898b6e141106178981ce2596c387ea6e62533facfc61a37fc879c0" -dependencies = [ - "asynchronous-codec", - "bytes", - "prost", - "thiserror", - "unsigned-varint 0.7.1", -] - -[[package]] -name = "prost-derive" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "prost-types" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" -dependencies = [ - "prost", -] - [[package]] name = "proto_array" version = "0.2.0" @@ -6516,6 +5977,19 @@ dependencies = [ "byteorder", ] +[[package]] +name = "quick-protobuf-codec" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" +dependencies = [ + "asynchronous-codec", + "bytes", + "quick-protobuf", + "thiserror", + "unsigned-varint 0.7.1", +] + [[package]] name = "quickcheck" version = "0.9.2" @@ -6550,29 +6024,11 @@ dependencies = [ "pin-project-lite 0.1.12", ] -[[package]] -name = "quinn-proto" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" -dependencies = [ - "bytes", - "rand 0.8.5", - "ring", - "rustc-hash", - "rustls 0.20.8", - "slab", - "thiserror", - "tinyvec", - "tracing", - "webpki 0.22.0", -] - [[package]] name = "quote" -version = "1.0.27" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" dependencies = [ "proc-macro2", ] @@ -6669,7 +6125,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -6712,38 +6168,13 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "rcgen" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" -dependencies = [ - "pem", - "ring", - "time 0.3.21", - "x509-parser 0.13.2", - "yasna", -] - -[[package]] -name = "rcgen" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" -dependencies = [ - "pem", - "ring", - "time 0.3.21", - "yasna", -] - [[package]] name = "redox_syscall" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -6752,7 +6183,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -6761,20 +6192,21 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.8.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.1", + "regex-automata 0.3.4", + "regex-syntax 0.7.4", ] [[package]] @@ -6786,6 +6218,17 @@ dependencies = [ "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.7.4", +] + [[package]] name = "regex-syntax" version = "0.6.29" @@ -6794,9 +6237,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "reqwest" @@ -6804,7 +6247,7 @@ version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -6822,15 +6265,15 @@ dependencies = [ "native-tls", "once_cell", "percent-encoding", - "pin-project-lite 0.2.9", - "rustls 0.21.1", + "pin-project-lite 0.2.10", + "rustls 0.21.5", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-rustls 0.24.0", + "tokio-rustls 0.24.1", "tokio-util 0.7.8", "tower-service", "url", @@ -6838,8 +6281,8 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots", - "winreg", + "webpki-roots 0.22.6", + "winreg 0.10.1", ] [[package]] @@ -6925,17 +6368,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rtcp" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1919efd6d4a6a85d13388f9487549bb8e359f17198cc03ffd72f79b553873691" -dependencies = [ - "bytes", - "thiserror", - "webrtc-util", -] - [[package]] name = "rtnetlink" version = "0.10.1" @@ -6951,32 +6383,18 @@ dependencies = [ "tokio", ] -[[package]] -name = "rtp" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a095411ff00eed7b12e4c6a118ba984d113e1079582570d56a5ee723f11f80" -dependencies = [ - "async-trait", - "bytes", - "rand 0.8.5", - "serde", - "thiserror", - "webrtc-util", -] - [[package]] name = "rusqlite" version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.2", + "hashlink 0.8.3", "libsqlite3-sys", - "smallvec", + "smallvec 1.11.0", ] [[package]] @@ -7012,30 +6430,34 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.17", -] - -[[package]] -name = "rusticata-macros" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" -dependencies = [ - "nom 7.1.3", + "semver 1.0.18", ] [[package]] name = "rustix" -version = "0.37.19" +version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", "io-lifetimes", "libc", - "linux-raw-sys", - "windows-sys 0.48.0", + "linux-raw-sys 0.3.8", + "windows-sys", +] + +[[package]] +name = "rustix" +version = "0.38.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +dependencies = [ + "bitflags 2.3.3", + "errno", + "libc", + "linux-raw-sys 0.4.5", + "windows-sys", ] [[package]] @@ -7065,23 +6487,23 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.1" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.2", "sct 0.7.0", ] [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", ] [[package]] @@ -7095,16 +6517,26 @@ dependencies = [ ] [[package]] -name = "rustversion" -version = "1.0.12" +name = "rustls-webpki" +version = "0.101.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" +checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rw-stream-sink" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" dependencies = [ "futures", "pin-project", @@ -7113,9 +6545,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "safe_arith" @@ -7133,7 +6565,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" dependencies = [ - "cipher 0.3.0", + "cipher", ] [[package]] @@ -7147,21 +6579,21 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.7.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b569c32c806ec3abdf3b5869fb8bf1e0d275a7c1c9b0b05603d9464632649edf" +checksum = "35c0a159d0c45c12b20c5a844feb1fe4bea86e28f17b92a5f0c42193634d3782" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.5.0", + "parity-scale-codec 3.6.4", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.6.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53012eae69e5aa5c14671942a5dd47de59d4cdcff8532a6dd0e081faf1119482" +checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -7171,11 +6603,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -7195,9 +6627,9 @@ checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scrypt" @@ -7231,18 +6663,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "sdp" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d22a5ef407871893fd72b4562ee15e4742269b173959db4b8df6f538c414e13" -dependencies = [ - "rand 0.8.5", - "substring", - "thiserror", - "url", -] - [[package]] name = "sec1" version = "0.3.0" @@ -7259,12 +6679,12 @@ dependencies = [ [[package]] name = "sec1" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0aec48e813d6b90b15f0b8948af3c63483992dee44c03e9930b3eebdabe046e" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.6", + "der 0.7.7", "generic-array", "pkcs8 0.10.2", "subtle", @@ -7273,11 +6693,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.1" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -7286,9 +6706,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -7305,9 +6725,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" dependencies = [ "serde", ] @@ -7334,13 +6754,24 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.163" +version = "1.0.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" +checksum = "0ea67f183f058fe88a4e3ec6e2788e003840893b91bac4559cabedd00863b3ed" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-hex" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca37e3e4d1b39afd7ff11ee4e947efae85adfddf4841787bfa47c470e96dc26d" +dependencies = [ + "array-init", + "serde", + "smallvec 0.6.14", +] + [[package]] name = "serde_array_query" version = "0.1.0" @@ -7363,20 +6794,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.163" +version = "1.0.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" +checksum = "24e744d7782b686ab3b73267ef05697159cc0e5abbed3f47f9933165e5219036" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ "itoa", "ryu", @@ -7385,9 +6816,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1b6471d7496b051e03f1958802a73f88b947866f5146f329e47e36554f4e55" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" dependencies = [ "itoa", "serde", @@ -7395,13 +6826,22 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.12" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" +checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", +] + +[[package]] +name = "serde_spanned" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +dependencies = [ + "serde", ] [[package]] @@ -7432,7 +6872,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", "syn 1.0.109", @@ -7444,7 +6884,7 @@ version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ - "indexmap", + "indexmap 1.9.3", "ryu", "serde", "yaml-rust", @@ -7500,9 +6940,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", @@ -7584,7 +7024,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.21", + "time 0.3.24", ] [[package]] @@ -7716,7 +7156,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.21", + "time 0.3.24", ] [[package]] @@ -7761,7 +7201,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.21", + "time 0.3.24", ] [[package]] @@ -7800,9 +7240,18 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.10.0" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" +dependencies = [ + "maybe-uninit", +] + +[[package]] +name = "smallvec" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "snap" @@ -7812,18 +7261,18 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d" +checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" dependencies = [ - "aes-gcm 0.9.4", + "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.2", + "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", - "sha2 0.10.6", + "sha2 0.10.7", "subtle", ] @@ -7844,7 +7293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -7855,7 +7304,6 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "flate2", "futures", "httparse", "log", @@ -7886,7 +7334,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.6", + "der 0.7.7", ] [[package]] @@ -7916,9 +7364,9 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e43767964a80b2fdeda7a79a57a2b6cbca966688d5b81da8fe91140a94f552a1" +checksum = "382939886cb24ee8ac885d09116a60f6262d827c7a9e36012b4f6d3d0116d0b3" dependencies = [ "arbitrary", "derivative", @@ -7927,7 +7375,7 @@ dependencies = [ "itertools", "serde", "serde_derive", - "smallvec", + "smallvec 1.11.0", "tree_hash", "typenum", ] @@ -7952,7 +7400,7 @@ dependencies = [ "merkle_proof", "rayon", "safe_arith", - "smallvec", + "smallvec 1.11.0", "ssz_types", "tokio", "tree_hash", @@ -8004,9 +7452,9 @@ dependencies = [ [[package]] name = "stringprep" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -8046,34 +7494,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "stun" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" -dependencies = [ - "base64 0.13.1", - "crc", - "lazy_static", - "md-5", - "rand 0.8.5", - "ring", - "subtle", - "thiserror", - "tokio", - "url", - "webrtc-util", -] - -[[package]] -name = "substring" -version = "1.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ee6433ecef213b2e72f587ef64a2f5943e7cd16fbd82dbe8bc07486c534c86" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "subtle" version = "2.4.1" @@ -8086,11 +7506,11 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a99807a055ff4ff5d249bb84c80d9eabb55ca3c452187daae43fd5b51ef695" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", - "smallvec", + "smallvec 1.11.0", "syn 1.0.109", ] @@ -8100,11 +7520,11 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", - "smallvec", + "smallvec 1.11.0", "syn 1.0.109", ] @@ -8130,9 +7550,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.16" +version = "2.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" +checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" dependencies = [ "proc-macro2", "quote", @@ -8178,7 +7598,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "system-configuration-sys", ] @@ -8246,15 +7666,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.5.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" dependencies = [ "cfg-if", - "fastrand", + "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix", - "windows-sys 0.45.0", + "rustix 0.38.4", + "windows-sys", ] [[package]] @@ -8307,7 +7727,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -8321,22 +7741,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -8371,10 +7791,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" +checksum = "b79eabcd964882a646b3584543ccabeae7869e9ac32a46f6f22b7a5bd405308b" dependencies = [ + "deranged", "itoa", "libc", "num_threads", @@ -8391,9 +7812,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" dependencies = [ "time-core", ] @@ -8464,21 +7885,22 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.1" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg 1.1.0", + "backtrace", "bytes", "libc", "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "signal-hook-registry", "socket2 0.4.9", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -8487,7 +7909,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tokio", ] @@ -8499,7 +7921,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -8528,7 +7950,7 @@ dependencies = [ "parking_lot 0.12.1", "percent-encoding", "phf", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "postgres-protocol", "postgres-types", "socket2 0.5.3", @@ -8560,11 +7982,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.1", + "rustls 0.21.5", "tokio", ] @@ -8575,7 +7997,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tokio", "tokio-util 0.7.8", ] @@ -8606,7 +8028,7 @@ dependencies = [ "tokio-rustls 0.23.4", "tungstenite 0.17.3", "webpki 0.22.0", - "webpki-roots", + "webpki-roots 0.22.6", ] [[package]] @@ -8620,7 +8042,7 @@ dependencies = [ "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "slab", "tokio", ] @@ -8633,9 +8055,8 @@ checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", - "futures-io", "futures-sink", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "slab", "tokio", "tracing", @@ -8650,6 +8071,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tower" version = "0.4.13" @@ -8659,7 +8114,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tokio", "tower-layer", "tower-service", @@ -8686,20 +8141,20 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -8744,7 +8199,7 @@ dependencies = [ "once_cell", "regex", "sharded-slab", - "smallvec", + "smallvec 1.11.0", "thread_local", "tracing", "tracing-core", @@ -8772,22 +8227,22 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8488e272d45adc36db8f6c99d09613f58a7cd06c7b347546c87d9a29ca11e8" +checksum = "5c998ac5fe2b07c025444bdd522e6258110b63861c6698eedc610c071980238d" dependencies = [ "ethereum-types 0.14.1", "ethereum_hashing", - "smallvec", + "smallvec 1.11.0", ] [[package]] name = "tree_hash_derive" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83baa26594d96889e5fef7638dfb0f41e16070301a5cf6da99b9a6a0804cec89" +checksum = "84303a9c7cda5f085a3ed9cd241d1e95e04d88aab1d679b02f212e653537ba86" dependencies = [ - "darling 0.13.4", + "darling", "quote", "syn 1.0.109", ] @@ -8819,7 +8274,7 @@ dependencies = [ "ipnet", "lazy_static", "rand 0.8.5", - "smallvec", + "smallvec 1.11.0", "socket2 0.4.9", "thiserror", "tinyvec", @@ -8841,7 +8296,7 @@ dependencies = [ "lru-cache", "parking_lot 0.12.1", "resolv-conf", - "smallvec", + "smallvec 1.11.0", "thiserror", "tokio", "tracing", @@ -8894,25 +8349,6 @@ dependencies = [ "webpki 0.22.0", ] -[[package]] -name = "turn" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" -dependencies = [ - "async-trait", - "base64 0.13.1", - "futures", - "log", - "md-5", - "rand 0.8.5", - "ring", - "stun", - "thiserror", - "tokio", - "webrtc-util", -] - [[package]] name = "twoway" version = "0.1.8" @@ -8967,7 +8403,7 @@ dependencies = [ "serde_with", "serde_yaml", "slog", - "smallvec", + "smallvec 1.11.0", "ssz_types", "state_processing", "strum", @@ -9016,9 +8452,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -9051,16 +8487,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - [[package]] name = "unsigned-varint" version = "0.6.0" @@ -9098,12 +8524,12 @@ dependencies = [ [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", ] @@ -9119,19 +8545,10 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "serde", ] -[[package]] -name = "uuid" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" -dependencies = [ - "getrandom 0.2.9", -] - [[package]] name = "validator_client" version = "0.3.5" @@ -9235,15 +8652,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "waitgroup" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1f50000a783467e6c0200f9d10642f4bc424e39efc1b770203e88b488f79292" -dependencies = [ - "atomic-waker", -] - [[package]] name = "waker-fn" version = "1.1.0" @@ -9262,11 +8670,10 @@ dependencies = [ [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -9338,9 +8745,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -9348,24 +8755,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -9375,9 +8782,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9385,22 +8792,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" @@ -9465,9 +8872,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -9530,211 +8937,12 @@ dependencies = [ ] [[package]] -name = "webrtc" -version = "0.6.0" +name = "webpki-roots" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3bc9049bdb2cea52f5fd4f6f728184225bdb867ed0dc2410eab6df5bdd67bb" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" dependencies = [ - "arc-swap", - "async-trait", - "bytes", - "hex", - "interceptor", - "lazy_static", - "log", - "rand 0.8.5", - "rcgen 0.9.3", - "regex", - "ring", - "rtcp", - "rtp", - "rustls 0.19.1", - "sdp", - "serde", - "serde_json", - "sha2 0.10.6", - "stun", - "thiserror", - "time 0.3.21", - "tokio", - "turn", - "url", - "waitgroup", - "webrtc-data", - "webrtc-dtls", - "webrtc-ice", - "webrtc-mdns", - "webrtc-media", - "webrtc-sctp", - "webrtc-srtp", - "webrtc-util", -] - -[[package]] -name = "webrtc-data" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef36a4d12baa6e842582fe9ec16a57184ba35e1a09308307b67d43ec8883100" -dependencies = [ - "bytes", - "derive_builder", - "log", - "thiserror", - "tokio", - "webrtc-sctp", - "webrtc-util", -] - -[[package]] -name = "webrtc-dtls" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a00f4242f2db33307347bd5be53263c52a0331c96c14292118c9a6bb48d267" -dependencies = [ - "aes 0.6.0", - "aes-gcm 0.10.2", - "async-trait", - "bincode", - "block-modes", - "byteorder", - "ccm", - "curve25519-dalek 3.2.0", - "der-parser 8.2.0", - "elliptic-curve 0.12.3", - "hkdf", - "hmac 0.12.1", - "log", - "p256", - "p384", - "rand 0.8.5", - "rand_core 0.6.4", - "rcgen 0.10.0", - "ring", - "rustls 0.19.1", - "sec1 0.3.0", - "serde", - "sha1", - "sha2 0.10.6", - "signature 1.6.4", - "subtle", - "thiserror", - "tokio", - "webpki 0.21.4", - "webrtc-util", - "x25519-dalek 2.0.0-rc.2", - "x509-parser 0.13.2", -] - -[[package]] -name = "webrtc-ice" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" -dependencies = [ - "arc-swap", - "async-trait", - "crc", - "log", - "rand 0.8.5", - "serde", - "serde_json", - "stun", - "thiserror", - "tokio", - "turn", - "url", - "uuid 1.3.3", - "waitgroup", - "webrtc-mdns", - "webrtc-util", -] - -[[package]] -name = "webrtc-mdns" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" -dependencies = [ - "log", - "socket2 0.4.9", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-media" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" -dependencies = [ - "byteorder", - "bytes", - "rand 0.8.5", - "rtp", - "thiserror", -] - -[[package]] -name = "webrtc-sctp" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d47adcd9427eb3ede33d5a7f3424038f63c965491beafcc20bc650a2f6679c0" -dependencies = [ - "arc-swap", - "async-trait", - "bytes", - "crc", - "log", - "rand 0.8.5", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-srtp" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6183edc4c1c6c0175f8812eefdce84dfa0aea9c3ece71c2bf6ddd3c964de3da5" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "aes-gcm 0.9.4", - "async-trait", - "byteorder", - "bytes", - "ctr 0.8.0", - "hmac 0.11.0", - "log", - "rtcp", - "rtp", - "sha-1 0.9.8", - "subtle", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-util" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f1db1727772c05cf7a2cfece52c3aca8045ca1e176cd517d323489aa3c6d87" -dependencies = [ - "async-trait", - "bitflags", - "bytes", - "cc", - "ipnet", - "lazy_static", - "libc", - "log", - "nix 0.24.3", - "rand 0.8.5", - "thiserror", - "tokio", - "winapi", + "rustls-webpki 0.100.1", ] [[package]] @@ -9756,9 +8964,9 @@ checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "widestring" -version = "0.5.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "wildmatch" @@ -9816,7 +9024,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.0", + "windows-targets", ] [[package]] @@ -9831,75 +9039,30 @@ dependencies = [ "winapi", ] -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", -] - [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets", ] [[package]] name = "windows-targets" -version = "0.42.2" +version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - -[[package]] -name = "windows-targets" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" -dependencies = [ - "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_gnullvm", "windows_aarch64_msvc 0.48.0", "windows_i686_gnu 0.48.0", "windows_i686_msvc 0.48.0", "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_gnullvm", "windows_x86_64_msvc 0.48.0", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" @@ -9912,12 +9075,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - [[package]] name = "windows_aarch64_msvc" version = "0.48.0" @@ -9930,12 +9087,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - [[package]] name = "windows_i686_gnu" version = "0.48.0" @@ -9948,12 +9099,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - [[package]] name = "windows_i686_msvc" version = "0.48.0" @@ -9966,24 +9111,12 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" @@ -9996,18 +9129,21 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - [[package]] name = "windows_x86_64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "winnow" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46aab759304e4d7b2075a9aecba26228bb073ee8c50db796b2c72c676b5d807" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.10.1" @@ -10017,6 +9153,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys", +] + [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -10062,60 +9208,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "x25519-dalek" -version = "2.0.0-rc.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" -dependencies = [ - "curve25519-dalek 4.0.0-rc.2", - "rand_core 0.6.4", - "serde", - "zeroize", -] - -[[package]] -name = "x509-parser" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" -dependencies = [ - "asn1-rs 0.3.1", - "base64 0.13.1", - "data-encoding", - "der-parser 7.0.0", - "lazy_static", - "nom 7.1.3", - "oid-registry 0.4.0", - "ring", - "rusticata-macros", - "thiserror", - "time 0.3.21", -] - -[[package]] -name = "x509-parser" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" -dependencies = [ - "asn1-rs 0.5.2", - "base64 0.13.1", - "data-encoding", - "der-parser 8.2.0", - "lazy_static", - "nom 7.1.3", - "oid-registry 0.6.1", - "rusticata-macros", - "thiserror", - "time 0.3.21", -] - [[package]] name = "xml-rs" -version = "0.8.11" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1690519550bfa95525229b9ca2350c63043a4857b3b0013811b2ccf4a2420b01" +checksum = "47430998a7b5d499ccee752b41567bc3afc57e1327dc855b1a2aa44ce29b5fa1" [[package]] name = "xmltree" @@ -10149,15 +9246,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "yasna" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" -dependencies = [ - "time 0.3.21", -] - [[package]] name = "zeroize" version = "1.6.0" @@ -10175,7 +9263,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] diff --git a/Cross.toml b/Cross.toml index 9c3e441cba..d5f7a5d506 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -dockerfile = './scripts/cross/Dockerfile' +pre-build = ["apt-get install -y cmake clang-3.9"] [target.aarch64-unknown-linux-gnu] -dockerfile = './scripts/cross/Dockerfile' +pre-build = ["apt-get install -y cmake clang-3.9"] diff --git a/Dockerfile b/Dockerfile index be01ad7c57..f07c42dd85 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES ARG PROFILE=release @@ -13,4 +13,4 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse +COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse \ No newline at end of file diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 9880a8ca61..694402a3d7 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -9,12 +9,9 @@ use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, - libp2p::{ - core::connection::ConnectionId, - swarm::{ - behaviour::{ConnectionEstablished, FromSwarm}, - NetworkBehaviour, - }, + libp2p::swarm::{ + behaviour::{ConnectionEstablished, FromSwarm}, + ConnectionId, NetworkBehaviour, }, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, @@ -170,7 +167,7 @@ pub async fn create_api_server_on_port( local_addr: EXTERNAL_ADDR.parse().unwrap(), send_back_addr: EXTERNAL_ADDR.parse().unwrap(), }; - let connection_id = ConnectionId::new(1); + let connection_id = ConnectionId::new_unchecked(1); pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, connection_id, diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index 9b15694211..785206b757 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,6 +1,6 @@ use crate::Context; use beacon_chain::BeaconChainTypes; -use lighthouse_metrics::{Encoder, TextEncoder}; +use lighthouse_metrics::TextEncoder; use lighthouse_network::prometheus_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; @@ -9,7 +9,7 @@ pub use lighthouse_metrics::*; pub fn gather_prometheus_metrics( ctx: &Context, ) -> std::result::Result { - let mut buffer = vec![]; + let mut buffer = String::new(); let encoder = TextEncoder::new(); // There are two categories of metrics: @@ -50,7 +50,7 @@ pub fn gather_prometheus_metrics( } encoder - .encode(&lighthouse_metrics::gather(), &mut buffer) + .encode_utf8(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); // encode gossipsub metrics also if they exist if let Some(registry) = ctx.gossipsub_registry.as_ref() { @@ -59,5 +59,5 @@ pub fn gather_prometheus_metrics( } } - String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) + Ok(buffer) } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 6d056d8350..f71845fed2 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -discv5 = { version = "0.3.0", features = ["libp2p"]} +discv5 = { version = "0.3.1", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } ssz_types = "0.5.3" @@ -40,15 +40,15 @@ directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" -prometheus-client = "0.18.0" +prometheus-client = "0.21.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.3.0" void = "1" [dependencies.libp2p] -version = "0.50.0" +version = "0.52" default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] +features = ["websocket", "identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] [dev-dependencies] slog-term = "2.6.0" diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 9467526458..6c8f20a24b 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -6,10 +6,7 @@ use directory::{ DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, }; use discv5::{Discv5Config, Discv5ConfigBuilder}; -use libp2p::gossipsub::{ - FastMessageId, GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, - RawGossipsubMessage, ValidationMode, -}; +use libp2p::gossipsub; use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -83,7 +80,7 @@ pub struct Config { /// Gossipsub configuration parameters. #[serde(skip)] - pub gs_config: GossipsubConfig, + pub gs_config: gossipsub::Config, /// Discv5 configuration parameters. #[serde(skip)] @@ -265,7 +262,7 @@ impl Default for Config { // Note: Using the default config here. Use `gossipsub_config` function for getting // Lighthouse specific configuration for gossipsub. - let gs_config = GossipsubConfigBuilder::default() + let gs_config = gossipsub::ConfigBuilder::default() .build() .expect("valid gossipsub configuration"); @@ -416,16 +413,16 @@ impl From for NetworkLoad { } /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. -pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { +pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> gossipsub::Config { // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(topic, data) for content addressing - let fast_gossip_message_id = |message: &RawGossipsubMessage| { + let fast_gossip_message_id = |message: &gossipsub::RawMessage| { let data = [message.topic.as_str().as_bytes(), &message.data].concat(); - FastMessageId::from(&Sha256::digest(data)[..8]) + gossipsub::FastMessageId::from(&Sha256::digest(data)[..8]) }; fn prefix( prefix: [u8; 4], - message: &GossipsubMessage, + message: &gossipsub::Message, fork_context: Arc, ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); @@ -451,8 +448,8 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos } let is_merge_enabled = fork_context.fork_exists(ForkName::Merge); - let gossip_message_id = move |message: &GossipsubMessage| { - MessageId::from( + let gossip_message_id = move |message: &gossipsub::Message| { + gossipsub::MessageId::from( &Sha256::digest( prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(), )[..20], @@ -461,7 +458,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos let load = NetworkLoad::from(network_load); - GossipsubConfigBuilder::default() + gossipsub::ConfigBuilder::default() .max_transmit_size(gossip_max_size(is_merge_enabled)) .heartbeat_interval(load.heartbeat_interval) .mesh_n(load.mesh_n) @@ -474,7 +471,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large .history_gossip(load.history_gossip) .validate_messages() // require validation before propagation - .validation_mode(ValidationMode::Anonymous) + .validation_mode(gossipsub::ValidationMode::Anonymous) .duplicate_cache_time(DUPLICATE_CACHE_TIME) .message_id_fn(gossip_message_id) .fast_message_id_fn(fast_gossip_message_id) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index f85c4b3e5c..ef22f816a7 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -7,7 +7,7 @@ use super::ENR_FILENAME; use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; use discv5::enr::EnrKey; -use libp2p::core::identity::Keypair; +use libp2p::identity::Keypair; use slog::{debug, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; @@ -133,7 +133,7 @@ pub fn build_or_load_enr( // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. - let enr_key = CombinedKey::from_libp2p(&local_key)?; + let enr_key = CombinedKey::from_libp2p(local_key)?; let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; use_or_load_enr(&enr_key, &mut local_enr, config, log)?; diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 5ce0c55cac..753da6292c 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -1,10 +1,9 @@ //! ENR extension trait to support libp2p integration. + use crate::{Enr, Multiaddr, PeerId}; use discv5::enr::{CombinedKey, CombinedPublicKey}; -use libp2p::{ - core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}, - identity::secp256k1, -}; +use libp2p::core::multiaddr::Protocol; +use libp2p::identity::{ed25519, secp256k1, KeyType, Keypair, PublicKey}; use tiny_keccak::{Hasher, Keccak}; /// Extend ENR for libp2p types. @@ -38,7 +37,8 @@ pub trait CombinedKeyPublicExt { /// Extend ENR CombinedKey for conversion to libp2p keys. pub trait CombinedKeyExt { /// Converts a libp2p key into an ENR combined key. - fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result; + fn from_libp2p(key: Keypair) -> Result; + /// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`]. fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey; } @@ -93,14 +93,14 @@ impl EnrExt for Enr { if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -108,14 +108,14 @@ impl EnrExt for Enr { if let Some(udp6) = self.udp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Udp(udp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } if let Some(tcp6) = self.tcp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Tcp(tcp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -133,7 +133,7 @@ impl EnrExt for Enr { if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -141,7 +141,7 @@ impl EnrExt for Enr { if let Some(tcp6) = self.tcp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Tcp(tcp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -159,7 +159,7 @@ impl EnrExt for Enr { if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -167,7 +167,7 @@ impl EnrExt for Enr { if let Some(udp6) = self.udp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Udp(udp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -204,18 +204,16 @@ impl CombinedKeyPublicExt for CombinedPublicKey { match self { Self::Secp256k1(pk) => { let pk_bytes = pk.to_sec1_bytes(); - let libp2p_pk = libp2p::core::PublicKey::Secp256k1( - libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes) - .expect("valid public key"), - ); + let libp2p_pk: PublicKey = secp256k1::PublicKey::try_from_bytes(&pk_bytes) + .expect("valid public key") + .into(); PeerId::from_public_key(&libp2p_pk) } Self::Ed25519(pk) => { let pk_bytes = pk.to_bytes(); - let libp2p_pk = libp2p::core::PublicKey::Ed25519( - libp2p::core::identity::ed25519::PublicKey::decode(&pk_bytes) - .expect("valid public key"), - ); + let libp2p_pk: PublicKey = ed25519::PublicKey::try_from_bytes(&pk_bytes) + .expect("valid public key") + .into(); PeerId::from_public_key(&libp2p_pk) } } @@ -223,18 +221,25 @@ impl CombinedKeyPublicExt for CombinedPublicKey { } impl CombinedKeyExt for CombinedKey { - fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result { - match key { - Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)), - Keypair::Ed25519(key) => { + fn from_libp2p(key: Keypair) -> Result { + match key.key_type() { + KeyType::Secp256k1 => { + let key = key.try_into_secp256k1().expect("right key type"); + let secret = + discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) + .expect("libp2p key must be valid"); + Ok(CombinedKey::Secp256k1(secret)) + } + KeyType::Ed25519 => { + let key = key.try_into_ed25519().expect("right key type"); let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes( - &(key.encode()[..32]) + &(key.to_bytes()[..32]) .try_into() .expect("libp2p key must be valid"), ); Ok(CombinedKey::from(ed_keypair)) } - Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), + _ => Err("Unsupported keypair kind"), } } fn from_secp256k1(key: &secp256k1::Keypair) -> Self { @@ -251,37 +256,46 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result { - let uncompressed_key_bytes = &pk.encode_uncompressed()[1..]; + })?; + + match public_key.key_type() { + KeyType::Secp256k1 => { + let pk = public_key + .clone() + .try_into_secp256k1() + .expect("right key type"); + let uncompressed_key_bytes = &pk.to_bytes_uncompressed()[1..]; let mut output = [0_u8; 32]; let mut hasher = Keccak::v256(); hasher.update(uncompressed_key_bytes); hasher.finalize(&mut output); Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")) } - PublicKey::Ed25519(pk) => { - let uncompressed_key_bytes = pk.encode(); + KeyType::Ed25519 => { + let pk = public_key + .clone() + .try_into_ed25519() + .expect("right key type"); + let uncompressed_key_bytes = pk.to_bytes(); let mut output = [0_u8; 32]; let mut hasher = Keccak::v256(); hasher.update(&uncompressed_key_bytes); hasher.finalize(&mut output); Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")) } - PublicKey::Ecdsa(_) => Err(format!( - "Unsupported public key (Ecdsa) from peer {}", - peer_id - )), + + _ => Err(format!("Unsupported public key from peer {}", peer_id)), } } #[cfg(test)] mod tests { + use super::*; #[test] @@ -290,9 +304,9 @@ mod tests { let sk_bytes = hex::decode(sk_hex).unwrap(); let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_slice(&sk_bytes).unwrap(); - let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap(); - let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into(); - let libp2p_kp = Keypair::Secp256k1(secp256k1_kp); + let libp2p_sk = secp256k1::SecretKey::try_from_bytes(sk_bytes).unwrap(); + let secp256k1_kp: secp256k1::Keypair = libp2p_sk.into(); + let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); let enr = discv5::enr::EnrBuilder::new("v4") @@ -311,9 +325,9 @@ mod tests { &sk_bytes.clone().try_into().unwrap(), ); - let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap(); - let secp256k1_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into(); - let libp2p_kp = Keypair::Ed25519(secp256k1_kp); + let libp2p_sk = ed25519::SecretKey::try_from_bytes(sk_bytes).unwrap(); + let secp256k1_kp: ed25519::Keypair = libp2p_sk.into(); + let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); let enr = discv5::enr::EnrBuilder::new("v4") diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index d4d0baef6b..0f8ddc53c1 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -16,19 +16,20 @@ pub use enr::{ Eth2Enr, }; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; -pub use libp2p::core::identity::{Keypair, PublicKey}; +pub use libp2p::identity::{Keypair, PublicKey}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; -use libp2p::swarm::AddressScore; +use libp2p::swarm::THandlerInEvent; pub use libp2p::{ - core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, + core::{ConnectedPoint, Multiaddr}, + identity::PeerId, swarm::{ - dummy::ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction as NBAction, - NotifyHandler, PollParameters, SubstreamProtocol, + dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, + PollParameters, SubstreamProtocol, ToSwarm, }, }; use lru::LruCache; @@ -191,7 +192,7 @@ pub struct Discovery { impl Discovery { /// NOTE: Creating discovery requires running within a tokio execution environment. pub async fn new( - local_key: &Keypair, + local_key: Keypair, config: &NetworkConfig, network_globals: Arc>, log: &slog::Logger, @@ -925,22 +926,51 @@ impl Discovery { impl NetworkBehaviour for Discovery { // Discovery is not a real NetworkBehaviour... type ConnectionHandler = ConnectionHandler; - type OutEvent = DiscoveredPeers; + type ToSwarm = DiscoveredPeers; - fn new_handler(&mut self) -> Self::ConnectionHandler { - ConnectionHandler + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _local_addr: &Multiaddr, + _remote_addr: &Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + // TODO: we might want to check discovery's banned ips here in the future. + Ok(ConnectionHandler) } - // Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them. - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - if let Some(enr) = self.enr_of_peer(peer_id) { + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _addr: &Multiaddr, + _role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + Ok(ConnectionHandler) + } + + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + _event: void::Void, + ) { + } + + fn handle_pending_outbound_connection( + &mut self, + _connection_id: ConnectionId, + maybe_peer: Option, + _addresses: &[Multiaddr], + _effective_role: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + if let Some(enr) = maybe_peer.and_then(|peer_id| self.enr_of_peer(&peer_id)) { // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP // port is removed, which is assumed to be associated with the discv5 protocol (and // therefore irrelevant for other libp2p components). - enr.multiaddr_tcp() + Ok(enr.multiaddr_tcp()) } else { - // PeerId is not known - Vec::new() + Ok(vec![]) } } @@ -949,7 +979,7 @@ impl NetworkBehaviour for Discovery { &mut self, cx: &mut Context, _: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll>> { if !self.started { return Poll::Pending; } @@ -960,7 +990,7 @@ impl NetworkBehaviour for Discovery { // Drive the queries and return any results from completed queries if let Some(peers) = self.poll_queries(cx) { // return the result to the peer manager - return Poll::Ready(NBAction::GenerateEvent(DiscoveredPeers { peers })); + return Poll::Ready(ToSwarm::GenerateEvent(DiscoveredPeers { peers })); } // Process the server event stream @@ -1034,10 +1064,7 @@ impl NetworkBehaviour for Discovery { if let Some(address) = addr { // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling // should handle this. - return Poll::Ready(NBAction::ReportObservedAddr { - address, - score: AddressScore::Finite(1), - }); + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(address)); } } Discv5Event::EnrAdded { .. } @@ -1065,8 +1092,9 @@ impl NetworkBehaviour for Discovery { | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => { + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrExpired(_) + | FromSwarm::ExternalAddrConfirmed(_) => { // Ignore events not relevant to discovery } } @@ -1077,10 +1105,8 @@ impl Discovery { fn on_dial_failure(&mut self, peer_id: Option, error: &DialError) { if let Some(peer_id) = peer_id { match error { - DialError::Banned - | DialError::LocalPeerId - | DialError::InvalidPeerId(_) - | DialError::ConnectionIo(_) + DialError::LocalPeerId { .. } + | DialError::Denied { .. } | DialError::NoAddresses | DialError::Transport(_) | DialError::WrongPeerId { .. } => { @@ -1088,9 +1114,7 @@ impl Discovery { debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); self.disconnect_peer(&peer_id); } - DialError::ConnectionLimit(_) - | DialError::DialPeerConditionFalse(_) - | DialError::Aborted => {} + DialError::DialPeerConditionFalse(_) | DialError::Aborted => {} } } } @@ -1139,8 +1163,8 @@ mod tests { false, &log, ); - let keypair = Keypair::Secp256k1(keypair); - Discovery::new(&keypair, &config, Arc::new(globals), &log) + let keypair = keypair.into(); + Discovery::new(keypair, &config, Arc::new(globals), &log) .await .unwrap() } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index c6c737caed..4f3454f403 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -21,7 +21,8 @@ use std::{ use strum::IntoEnumIterator; use types::{EthSpec, SyncSubnetId}; -pub use libp2p::core::{identity::Keypair, Multiaddr}; +pub use libp2p::core::Multiaddr; +pub use libp2p::identity::Keypair; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod peerdb; diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 24de83a61d..ce374bb9ab 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -1,12 +1,14 @@ +//! Implementation of [`NetworkBehaviour`] for the [`PeerManager`]. + use std::task::{Context, Poll}; use futures::StreamExt; use libp2p::core::ConnectedPoint; +use libp2p::identity::PeerId; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p::PeerId; +use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; use slog::{debug, error}; use types::EthSpec; @@ -19,20 +21,24 @@ use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; impl NetworkBehaviour for PeerManager { type ConnectionHandler = ConnectionHandler; - - type OutEvent = PeerManagerEvent; + type ToSwarm = PeerManagerEvent; /* Required trait members */ - fn new_handler(&mut self) -> Self::ConnectionHandler { - ConnectionHandler + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + _event: libp2p::swarm::THandlerOutEvent, + ) { + // no events from the dummy handler } fn poll( &mut self, cx: &mut Context<'_>, _params: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll> { // perform the heartbeat when necessary while self.heartbeat.poll_tick(cx).is_ready() { self.heartbeat(); @@ -84,19 +90,17 @@ impl NetworkBehaviour for PeerManager { } if !self.events.is_empty() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + return Poll::Ready(ToSwarm::GenerateEvent(self.events.remove(0))); } else { self.events.shrink_to_fit(); } if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_first() { self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr); - let handler = self.new_handler(); - return Poll::Ready(NetworkBehaviourAction::Dial { + return Poll::Ready(ToSwarm::Dial { opts: DialOpts::peer_id(peer_id) .condition(PeerCondition::Disconnected) .build(), - handler, }); } @@ -110,13 +114,31 @@ impl NetworkBehaviour for PeerManager { endpoint, other_established, .. - }) => self.on_connection_established(peer_id, endpoint, other_established), + }) => { + // NOTE: We still need to handle the [`ConnectionEstablished`] because the + // [`NetworkBehaviour::handle_established_inbound_connection`] and + // [`NetworkBehaviour::handle_established_outbound_connection`] are fallible. This + // means another behaviour can kill the connection early, and we can't assume a + // peer as connected until this event is received. + self.on_connection_established(peer_id, endpoint, other_established) + } FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, remaining_established, .. }) => self.on_connection_closed(peer_id, remaining_established), - FromSwarm::DialFailure(DialFailure { peer_id, .. }) => self.on_dial_failure(peer_id), + FromSwarm::DialFailure(DialFailure { + peer_id, + error, + connection_id: _, + }) => { + debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %error); + self.on_dial_failure(peer_id); + } + FromSwarm::ExternalAddrConfirmed(_) => { + // TODO: we likely want to check this against our assumed external tcp + // address + } FromSwarm::AddressChange(_) | FromSwarm::ListenFailure(_) | FromSwarm::NewListener(_) @@ -124,13 +146,35 @@ impl NetworkBehaviour for PeerManager { | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => { + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrExpired(_) => { // The rest of the events we ignore since they are handled in their associated // `SwarmEvent` } } } + + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _local_addr: &libp2p::Multiaddr, + _remote_addr: &libp2p::Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + // TODO: we might want to check if we accept this peer or not in the future. + Ok(ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _addr: &libp2p::Multiaddr, + _role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + // TODO: we might want to check if we accept this peer or not in the future. + Ok(ConnectionHandler) + } } impl PeerManager { diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 8199bee2a7..d42248ad5f 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -3,21 +3,21 @@ use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, ResponseTermination}; use super::outbound::OutboundRequestContainer; -use super::protocol::{max_rpc_size, InboundRequest, Protocol, RPCError, RPCProtocol}; +use super::protocol::{ + max_rpc_size, InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol, +}; use super::{RPCReceived, RPCSend, ReqId}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; use futures::prelude::*; use futures::{Sink, SinkExt}; -use libp2p::core::upgrade::{ - InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError, -}; use libp2p::swarm::handler::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, StreamUpgradeError, SubstreamProtocol, }; -use libp2p::swarm::NegotiatedSubstream; +use libp2p::swarm::Stream; use slog::{crit, debug, trace, warn}; use smallvec::SmallVec; use std::{ @@ -47,7 +47,7 @@ const MAX_INBOUND_SUBSTREAMS: usize = 32; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct SubstreamId(usize); -type InboundSubstream = InboundFramed; +type InboundSubstream = InboundFramed; /// Events the handler emits to the behaviour. pub type HandlerEvent = Result, HandlerErr>; @@ -195,12 +195,12 @@ pub enum OutboundSubstreamState { /// handler because GOODBYE requests can be handled and responses dropped instantly. RequestPendingResponse { /// The framed negotiated substream. - substream: Box>, + substream: Box>, /// Keeps track of the actual request sent. request: OutboundRequest, }, /// Closing an outbound substream> - Closing(Box>), + Closing(Box>), /// Temporary state during processing Poisoned, } @@ -212,7 +212,7 @@ where pub fn new( listen_protocol: SubstreamProtocol, ()>, fork_context: Arc, - log: &slog::Logger, + log: slog::Logger, ) -> Self { RPCHandler { listen_protocol, @@ -230,7 +230,7 @@ where outbound_io_error_retries: 0, fork_context, waker: None, - log: log.clone(), + log, } } @@ -315,8 +315,8 @@ where TSpec: EthSpec, Id: ReqId, { - type InEvent = RPCSend; - type OutEvent = HandlerEvent; + type FromBehaviour = RPCSend; + type ToBehaviour = HandlerEvent; type Error = RPCError; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; @@ -327,121 +327,7 @@ where self.listen_protocol.clone() } - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - request_info: Self::OutboundOpenInfo, - ) { - self.dial_negotiated -= 1; - let (id, request) = request_info; - let proto = request.versioned_protocol().protocol(); - - // accept outbound connections only if the handler is not deactivated - if matches!(self.state, HandlerState::Deactivated) { - self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto, - id, - })); - } - - // add the stream to substreams if we expect a response, otherwise drop the stream. - let expected_responses = request.expected_responses(); - if expected_responses > 0 { - // new outbound request. Store the stream and tag the output. - let delay_key = self.outbound_substreams_delay.insert( - self.current_outbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); - let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { - substream: Box::new(out), - request, - }; - let expected_responses = if expected_responses > 1 { - // Currently enforced only for multiple responses - Some(expected_responses) - } else { - None - }; - if self - .outbound_substreams - .insert( - self.current_outbound_substream_id, - OutboundInfo { - state: awaiting_stream, - delay_key, - proto, - remaining_chunks: expected_responses, - req_id: id, - }, - ) - .is_some() - { - crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id); - } - self.current_outbound_substream_id.0 += 1; - } - } - - fn inject_fully_negotiated_inbound( - &mut self, - substream: >::Output, - _info: Self::InboundOpenInfo, - ) { - // only accept new peer requests when active - if !matches!(self.state, HandlerState::Active) { - return; - } - - let (req, substream) = substream; - let expected_responses = req.expected_responses(); - - // store requests that expect responses - if expected_responses > 0 { - if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { - // Store the stream and tag the output. - let delay_key = self.inbound_substreams_delay.insert( - self.current_inbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); - let awaiting_stream = InboundState::Idle(substream); - self.inbound_substreams.insert( - self.current_inbound_substream_id, - InboundInfo { - state: awaiting_stream, - pending_items: VecDeque::with_capacity(std::cmp::min( - expected_responses, - 128, - ) as usize), - delay_key: Some(delay_key), - protocol: req.versioned_protocol().protocol(), - request_start_time: Instant::now(), - remaining_chunks: expected_responses, - }, - ); - } else { - self.events_out.push(Err(HandlerErr::Inbound { - id: self.current_inbound_substream_id, - proto: req.versioned_protocol().protocol(), - error: RPCError::HandlerRejected, - })); - return self.shutdown(None); - } - } - - // If we received a goodbye, shutdown the connection. - if let InboundRequest::Goodbye(_) = req { - self.shutdown(None); - } - - self.events_out.push(Ok(RPCReceived::Request( - self.current_inbound_substream_id, - req, - ))); - self.current_inbound_substream_id.0 += 1; - } - - fn inject_event(&mut self, rpc_event: Self::InEvent) { + fn on_behaviour_event(&mut self, rpc_event: Self::FromBehaviour) { match rpc_event { RPCSend::Request(id, req) => self.send_request(id, req), RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response), @@ -453,56 +339,6 @@ where } } - fn inject_dial_upgrade_error( - &mut self, - request_info: Self::OutboundOpenInfo, - error: ConnectionHandlerUpgrErr< - >::Error, - >, - ) { - let (id, req) = request_info; - if let ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(RPCError::IoError(_))) = error - { - self.outbound_io_error_retries += 1; - if self.outbound_io_error_retries < IO_ERROR_RETRIES { - self.send_request(id, req); - return; - } - } - - // This dialing is now considered failed - self.dial_negotiated -= 1; - - self.outbound_io_error_retries = 0; - // map the error - let error = match error { - ConnectionHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"), - ConnectionHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout, - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e, - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { - RPCError::UnsupportedProtocol - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::ProtocolError(e), - )) => match e { - ProtocolError::IoError(io_err) => RPCError::IoError(io_err.to_string()), - ProtocolError::InvalidProtocol => { - RPCError::InternalError("Protocol was deemed invalid") - } - ProtocolError::InvalidMessage | ProtocolError::TooManyProtocols => { - // Peer is sending invalid data during the negotiation phase, not - // participating in the protocol - RPCError::InvalidData("Invalid message during negotiation".to_string()) - } - }, - }; - self.events_out.push(Err(HandlerErr::Outbound { - error, - proto: req.versioned_protocol().protocol(), - id, - })); - } - fn connection_keep_alive(&self) -> KeepAlive { // Check that we don't have outbound items pending for dialing, nor dialing, nor // established. Also check that there are no established inbound substreams. @@ -535,7 +371,7 @@ where ConnectionHandlerEvent< Self::OutboundProtocol, Self::OutboundOpenInfo, - Self::OutEvent, + Self::ToBehaviour, Self::Error, >, > { @@ -548,7 +384,9 @@ where } // return any events that need to be reported if !self.events_out.is_empty() { - return Poll::Ready(ConnectionHandlerEvent::Custom(self.events_out.remove(0))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + self.events_out.remove(0), + )); } else { self.events_out.shrink_to_fit(); } @@ -612,7 +450,9 @@ where error: RPCError::StreamTimeout, }; // notify the user - return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( + outbound_err, + ))); } else { crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref()); } @@ -872,7 +712,7 @@ where }), }; - return Poll::Ready(ConnectionHandlerEvent::Custom(received)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(received)); } Poll::Ready(None) => { // stream closed @@ -887,7 +727,7 @@ where // notify the application error if request.expected_responses() > 1 { // return an end of stream result - return Poll::Ready(ConnectionHandlerEvent::Custom(Ok( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok( RPCReceived::EndOfStream(request_id, request.stream_termination()), ))); } @@ -898,7 +738,9 @@ where proto: request.versioned_protocol().protocol(), error: RPCError::IncompleteStream, }; - return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( + outbound_err, + ))); } Poll::Pending => { entry.get_mut().state = @@ -914,7 +756,9 @@ where error: e, }; entry.remove_entry(); - return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( + outbound_err, + ))); } }, OutboundSubstreamState::Closing(mut substream) => { @@ -940,7 +784,7 @@ where }; if let Some(termination) = termination { - return Poll::Ready(ConnectionHandlerEvent::Custom(Ok( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok( RPCReceived::EndOfStream(request_id, termination), ))); } @@ -989,6 +833,207 @@ where Poll::Pending } + + fn on_connection_event( + &mut self, + event: ConnectionEvent< + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + match event { + ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { + protocol, + info: _, + }) => self.on_fully_negotiated_inbound(protocol), + ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { + protocol, + info, + }) => self.on_fully_negotiated_outbound(protocol, info), + ConnectionEvent::DialUpgradeError(DialUpgradeError { info, error }) => { + self.on_dial_upgrade_error(info, error) + } + ConnectionEvent::ListenUpgradeError(libp2p::swarm::handler::ListenUpgradeError { + info: _, + error: _, /* RPCError */ + }) => { + // This is going to be removed in the next libp2p release. I think its fine to do + // nothing. + } + ConnectionEvent::LocalProtocolsChange(_) => { + // This shouldn't effect this handler, we will still negotiate streams if we support + // the protocol as usual. + } + ConnectionEvent::RemoteProtocolsChange(_) => { + // This shouldn't effect this handler, we will still negotiate streams if we support + // the protocol as usual. + } + ConnectionEvent::AddressChange(_) => { + // We dont care about these changes as they have no bearing on our RPC internal + // logic. + } + } + } +} + +impl RPCHandler +where + Id: ReqId, + TSpec: EthSpec, +{ + fn on_fully_negotiated_inbound(&mut self, substream: InboundOutput) { + // only accept new peer requests when active + if !matches!(self.state, HandlerState::Active) { + return; + } + + let (req, substream) = substream; + let expected_responses = req.expected_responses(); + + // store requests that expect responses + if expected_responses > 0 { + if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { + // Store the stream and tag the output. + let delay_key = self.inbound_substreams_delay.insert( + self.current_inbound_substream_id, + Duration::from_secs(RESPONSE_TIMEOUT), + ); + let awaiting_stream = InboundState::Idle(substream); + self.inbound_substreams.insert( + self.current_inbound_substream_id, + InboundInfo { + state: awaiting_stream, + pending_items: VecDeque::with_capacity(std::cmp::min( + expected_responses, + 128, + ) as usize), + delay_key: Some(delay_key), + protocol: req.versioned_protocol().protocol(), + request_start_time: Instant::now(), + remaining_chunks: expected_responses, + }, + ); + } else { + self.events_out.push(Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: req.versioned_protocol().protocol(), + error: RPCError::HandlerRejected, + })); + return self.shutdown(None); + } + } + + // If we received a goodbye, shutdown the connection. + if let InboundRequest::Goodbye(_) = req { + self.shutdown(None); + } + + self.events_out.push(Ok(RPCReceived::Request( + self.current_inbound_substream_id, + req, + ))); + self.current_inbound_substream_id.0 += 1; + } + + fn on_fully_negotiated_outbound( + &mut self, + substream: OutboundFramed, + (id, request): (Id, OutboundRequest), + ) { + self.dial_negotiated -= 1; + // Reset any io-retries counter. + self.outbound_io_error_retries = 0; + + let proto = request.versioned_protocol().protocol(); + + // accept outbound connections only if the handler is not deactivated + if matches!(self.state, HandlerState::Deactivated) { + self.events_out.push(Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto, + id, + })); + } + + // add the stream to substreams if we expect a response, otherwise drop the stream. + let expected_responses = request.expected_responses(); + if expected_responses > 0 { + // new outbound request. Store the stream and tag the output. + let delay_key = self.outbound_substreams_delay.insert( + self.current_outbound_substream_id, + Duration::from_secs(RESPONSE_TIMEOUT), + ); + let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { + substream: Box::new(substream), + request, + }; + let expected_responses = if expected_responses > 1 { + // Currently enforced only for multiple responses + Some(expected_responses) + } else { + None + }; + if self + .outbound_substreams + .insert( + self.current_outbound_substream_id, + OutboundInfo { + state: awaiting_stream, + delay_key, + proto, + remaining_chunks: expected_responses, + req_id: id, + }, + ) + .is_some() + { + crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id); + } + self.current_outbound_substream_id.0 += 1; + } + } + fn on_dial_upgrade_error( + &mut self, + request_info: (Id, OutboundRequest), + error: StreamUpgradeError, + ) { + let (id, req) = request_info; + + // map the error + let error = match error { + StreamUpgradeError::Timeout => RPCError::NegotiationTimeout, + StreamUpgradeError::Apply(RPCError::IoError(e)) => { + self.outbound_io_error_retries += 1; + if self.outbound_io_error_retries < IO_ERROR_RETRIES { + self.send_request(id, req); + return; + } + RPCError::IoError(e) + } + StreamUpgradeError::NegotiationFailed => RPCError::UnsupportedProtocol, + StreamUpgradeError::Io(io_err) => { + self.outbound_io_error_retries += 1; + if self.outbound_io_error_retries < IO_ERROR_RETRIES { + self.send_request(id, req); + return; + } + RPCError::IoError(io_err.to_string()) + } + StreamUpgradeError::Apply(other) => other, + }; + + // This dialing is now considered failed + self.dial_negotiated -= 1; + + self.outbound_io_error_retries = 0; + self.events_out.push(Err(HandlerErr::Outbound { + error, + proto: req.versioned_protocol().protocol(), + id, + })); + } } impl slog::Value for SubstreamId { diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index ffdc193bbb..4fd9b516d4 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -6,11 +6,11 @@ use futures::future::FutureExt; use handler::{HandlerEvent, RPCHandler}; -use libp2p::core::connection::ConnectionId; use libp2p::swarm::{ - handler::ConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, - PollParameters, SubstreamProtocol, + handler::ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters, + ToSwarm, }; +use libp2p::swarm::{FromSwarm, SubstreamProtocol, THandlerInEvent}; use libp2p::PeerId; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use slog::{crit, debug, o}; @@ -21,7 +21,7 @@ use types::{EthSpec, ForkContext}; pub(crate) use handler::HandlerErr; pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; -pub(crate) use protocol::{InboundRequest, RPCProtocol}; +pub(crate) use protocol::InboundRequest; pub use handler::SubstreamId; pub use methods::{ @@ -32,6 +32,7 @@ pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; +use self::protocol::RPCProtocol; use self::self_limiter::SelfRateLimiter; pub(crate) mod codec; @@ -104,8 +105,7 @@ pub struct RPCMessage { pub event: HandlerEvent, } -type BehaviourAction = - NetworkBehaviourAction, RPCHandler>; +type BehaviourAction = ToSwarm, RPCSend>; /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. @@ -161,7 +161,7 @@ impl RPC { id: (ConnectionId, SubstreamId), event: RPCCodedResponse, ) { - self.events.push(NetworkBehaviourAction::NotifyHandler { + self.events.push(ToSwarm::NotifyHandler { peer_id, handler: NotifyHandler::One(id.0), event: RPCSend::Response(id.1, event), @@ -181,7 +181,7 @@ impl RPC { } } } else { - NetworkBehaviourAction::NotifyHandler { + ToSwarm::NotifyHandler { peer_id, handler: NotifyHandler::Any, event: RPCSend::Request(request_id, req), @@ -194,7 +194,7 @@ impl RPC { /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This /// gracefully terminates the RPC behaviour with a goodbye message. pub fn shutdown(&mut self, peer_id: PeerId, id: Id, reason: GoodbyeReason) { - self.events.push(NetworkBehaviourAction::NotifyHandler { + self.events.push(ToSwarm::NotifyHandler { peer_id, handler: NotifyHandler::Any, event: RPCSend::Shutdown(id, reason), @@ -208,29 +208,83 @@ where Id: ReqId, { type ConnectionHandler = RPCHandler; - type OutEvent = RPCMessage; + type ToSwarm = RPCMessage; - fn new_handler(&mut self) -> Self::ConnectionHandler { - RPCHandler::new( - SubstreamProtocol::new( - RPCProtocol { - fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context), - enable_light_client_server: self.enable_light_client_server, - phantom: PhantomData, - }, - (), - ), - self.fork_context.clone(), - &self.log, - ) + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + peer_id: PeerId, + _local_addr: &libp2p::Multiaddr, + _remote_addr: &libp2p::Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + let protocol = SubstreamProtocol::new( + RPCProtocol { + fork_context: self.fork_context.clone(), + max_rpc_size: max_rpc_size(&self.fork_context), + enable_light_client_server: self.enable_light_client_server, + phantom: PhantomData, + }, + (), + ); + // NOTE: this is needed because PeerIds have interior mutability. + let peer_repr = peer_id.to_string(); + let log = self.log.new(slog::o!("peer_id" => peer_repr)); + let handler = RPCHandler::new(protocol, self.fork_context.clone(), log); + + Ok(handler) } - fn inject_event( + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + peer_id: PeerId, + _addr: &libp2p::Multiaddr, + _role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + let protocol = SubstreamProtocol::new( + RPCProtocol { + fork_context: self.fork_context.clone(), + max_rpc_size: max_rpc_size(&self.fork_context), + enable_light_client_server: self.enable_light_client_server, + phantom: PhantomData, + }, + (), + ); + + // NOTE: this is needed because PeerIds have interior mutability. + let peer_repr = peer_id.to_string(); + let log = self.log.new(slog::o!("peer_id" => peer_repr)); + let handler = RPCHandler::new(protocol, self.fork_context.clone(), log); + + Ok(handler) + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::ConnectionClosed(_) + | FromSwarm::ConnectionEstablished(_) + | FromSwarm::AddressChange(_) + | FromSwarm::DialFailure(_) + | FromSwarm::ListenFailure(_) + | FromSwarm::NewListener(_) + | FromSwarm::NewListenAddr(_) + | FromSwarm::ExpiredListenAddr(_) + | FromSwarm::ListenerError(_) + | FromSwarm::ListenerClosed(_) + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrExpired(_) + | FromSwarm::ExternalAddrConfirmed(_) => { + // Rpc Behaviour does not act on these swarm events. We use a comprehensive match + // statement to ensure future events are dealt with appropriately. + } + } + } + + fn on_connection_handler_event( &mut self, peer_id: PeerId, conn_id: ConnectionId, - event: ::OutEvent, + event: ::ToBehaviour, ) { if let Ok(RPCReceived::Request(ref id, ref req)) = event { if let Some(limiter) = self.limiter.as_mut() { @@ -238,12 +292,11 @@ where match limiter.allows(&peer_id, req) { Ok(()) => { // send the event to the user - self.events - .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })) + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })) } Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols @@ -281,20 +334,18 @@ where } } else { // No rate limiting, send the event to the user - self.events - .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })) - } - } else { - self.events - .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { + self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, event, - })); + })) + } + } else { + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })); } } @@ -302,7 +353,7 @@ where &mut self, cx: &mut Context, _: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll>> { // let the rate limiter prune. if let Some(limiter) = self.limiter.as_mut() { let _ = limiter.poll_unpin(cx); diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index ea39c1423a..22f9f19d68 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -7,7 +7,7 @@ use crate::rpc::{ use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, StreamExt}; -use libp2p::core::{InboundUpgrade, ProtocolName, UpgradeInfo}; +use libp2p::core::{InboundUpgrade, UpgradeInfo}; use ssz::Encode; use ssz_types::VariableList; use std::io; @@ -313,6 +313,12 @@ pub struct ProtocolId { protocol_id: String, } +impl AsRef for ProtocolId { + fn as_ref(&self) -> &str { + self.protocol_id.as_ref() + } +} + impl ProtocolId { /// Returns min and max size for messages of given protocol id requests. pub fn rpc_request_limits(&self) -> RpcLimits { @@ -407,12 +413,6 @@ impl ProtocolId { } } -impl ProtocolName for ProtocolId { - fn protocol_name(&self) -> &[u8] { - self.protocol_id.as_bytes() - } -} - /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 626917d6a7..4348c1ec6d 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -64,7 +64,7 @@ impl SelfRateLimiter { } /// Checks if the rate limiter allows the request. If it's allowed, returns the - /// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed + /// [`ToSwarm`] that should be emitted. When not allowed, the request is delayed /// until it can be sent. pub fn allows( &mut self, @@ -95,7 +95,7 @@ impl SelfRateLimiter { } /// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the - /// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request + /// request, the [`ToSwarm`] that should be emitted is returned. If the request /// should be delayed, it's returned with the duration to wait. fn try_send_request( limiter: &mut RateLimiter, diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 5ab89fee51..187c0ab1b1 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use libp2p::core::connection::ConnectionId; +use libp2p::swarm::ConnectionId; use types::light_client_bootstrap::LightClientBootstrap; use types::{EthSpec, SignedBeaconBlock}; diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 7d20b87ad1..6c52a07c14 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -3,21 +3,27 @@ use crate::peer_manager::PeerManager; use crate::rpc::{ReqId, RPC}; use crate::types::SnappyTransform; -use libp2p::gossipsub::subscription_filter::{ - MaxCountSubscriptionFilter, WhitelistSubscriptionFilter, -}; -use libp2p::gossipsub::Gossipsub as BaseGossipsub; -use libp2p::identify::Behaviour as Identify; +use libp2p::gossipsub; +use libp2p::identify; use libp2p::swarm::NetworkBehaviour; use types::EthSpec; use super::api_types::RequestId; -pub type SubscriptionFilter = MaxCountSubscriptionFilter; -pub type Gossipsub = BaseGossipsub; +pub type SubscriptionFilter = + gossipsub::MaxCountSubscriptionFilter; +pub type Gossipsub = gossipsub::Behaviour; #[derive(NetworkBehaviour)] -pub(crate) struct Behaviour { +pub(crate) struct Behaviour +where + AppReqId: ReqId, + TSpec: EthSpec, +{ + /// Peers banned. + pub banned_peers: libp2p::allow_block_list::Behaviour, + /// Keep track of active and pending connections to enforce hard limits. + pub connection_limits: libp2p::connection_limits::Behaviour, /// The routing pub-sub mechanism for eth2. pub gossipsub: Gossipsub, /// The Eth2 RPC specified in the wire-0 protocol. @@ -27,7 +33,7 @@ pub(crate) struct Behaviour { /// Keep regular connection to peers and disconnect if absent. // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. - pub identify: Identify, + pub identify: identify::Behaviour, /// The peer manager that keeps track of peer's reputation and status. pub peer_manager: PeerManager, } diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index 88becd686e..b058fc0ff1 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -1,7 +1,8 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::{error, TopicHash}; use libp2p::gossipsub::{ - GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, + Config as GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, + TopicScoreParams, }; use std::cmp::max; use std::collections::HashMap; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 129a4da25b..1a25beee0a 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -24,15 +24,12 @@ use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::bandwidth::BandwidthSinks; -use libp2p::gossipsub::error::PublishError; -use libp2p::gossipsub::metrics::Config as GossipsubMetricsConfig; -use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter; use libp2p::gossipsub::{ - GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, + self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, }; -use libp2p::identify::{Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent}; +use libp2p::identify; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; -use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::swarm::{Swarm, SwarmBuilder, SwarmEvent}; use libp2p::PeerId; use slog::{crit, debug, info, o, trace, warn}; use std::path::PathBuf; @@ -66,10 +63,6 @@ pub enum NetworkEvent { PeerConnectedIncoming(PeerId), /// A peer has disconnected. PeerDisconnected(PeerId), - /// The peer needs to be banned. - PeerBanned(PeerId), - /// The peer has been unbanned. - PeerUnbanned(PeerId), /// An RPC Request that was sent failed. RPCFailed { /// The id of the failed request. @@ -229,7 +222,7 @@ impl Network { let update_gossipsub_scores = tokio::time::interval(params.decay_interval); let possible_fork_digests = ctx.fork_context.all_fork_digests(); - let filter = MaxCountSubscriptionFilter { + let filter = gossipsub::MaxCountSubscriptionFilter { filter: utils::create_whitelist_filter( possible_fork_digests, ctx.chain_spec.attestation_subnet_count, @@ -244,7 +237,7 @@ impl Network { // If metrics are enabled for gossipsub build the configuration let gossipsub_metrics = ctx .gossipsub_registry - .map(|registry| (registry, GossipsubMetricsConfig::default())); + .map(|registry| (registry, Default::default())); let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( @@ -273,26 +266,32 @@ impl Network { let discovery = { // Build and start the discovery sub-behaviour - let mut discovery = - Discovery::new(&local_keypair, &config, network_globals.clone(), &log).await?; + let mut discovery = Discovery::new( + local_keypair.clone(), + &config, + network_globals.clone(), + &log, + ) + .await?; // start searching for peers discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); discovery }; let identify = { + let local_public_key = local_keypair.public(); let identify_config = if config.private { - IdentifyConfig::new( + identify::Config::new( "".into(), - local_keypair.public(), // Still send legitimate public key + local_public_key, // Still send legitimate public key ) .with_cache_size(0) } else { - IdentifyConfig::new("eth2/1.0.0".into(), local_keypair.public()) + identify::Config::new("eth2/1.0.0".into(), local_public_key) .with_agent_version(lighthouse_version::version_with_platform()) .with_cache_size(0) }; - Identify::new(identify_config) + identify::Behaviour::new(identify_config) }; let peer_manager = { @@ -305,13 +304,38 @@ impl Network { PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)? }; + let connection_limits = { + let limits = libp2p::connection_limits::ConnectionLimits::default() + .with_max_pending_incoming(Some(5)) + .with_max_pending_outgoing(Some(16)) + .with_max_established_incoming(Some( + (config.target_peers as f32 + * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) + .ceil() as u32, + )) + .with_max_established_outgoing(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, + )) + .with_max_established(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) + .ceil() as u32, + )) + .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); + + libp2p::connection_limits::Behaviour::new(limits) + }; + + let banned_peers = libp2p::allow_block_list::Behaviour::default(); + let behaviour = { Behaviour { + banned_peers, gossipsub, eth2_rpc, discovery, identify, peer_manager, + connection_limits, } }; @@ -329,22 +353,6 @@ impl Network { } // sets up the libp2p connection limits - let limits = ConnectionLimits::default() - .with_max_pending_incoming(Some(5)) - .with_max_pending_outgoing(Some(16)) - .with_max_established_incoming(Some( - (config.target_peers as f32 - * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) - .ceil() as u32, - )) - .with_max_established_outgoing(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, - )) - .with_max_established(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) - .ceil() as u32, - )) - .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); ( SwarmBuilder::with_executor( @@ -354,8 +362,7 @@ impl Network { Executor(executor), ) .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) - .connection_event_buffer_size(64) - .connection_limits(limits) + .per_connection_event_buffer_size(4) .build(), bandwidth, ) @@ -396,7 +403,7 @@ impl Network { match self.swarm.listen_on(listen_multiaddr.clone()) { Ok(_) => { let mut log_address = listen_multiaddr; - log_address.push(MProtocol::P2p(enr.peer_id().into())); + log_address.push(MProtocol::P2p(enr.peer_id())); info!(self.log, "Listening established"; "address" => %log_address); } Err(err) => { @@ -493,7 +500,7 @@ impl Network { &mut self.swarm.behaviour_mut().discovery } /// Provides IP addresses and peer information. - pub fn identify_mut(&mut self) -> &mut Identify { + pub fn identify_mut(&mut self) -> &mut identify::Behaviour { &mut self.swarm.behaviour_mut().identify } /// The peer manager that keeps track of peer's reputation and status. @@ -514,7 +521,7 @@ impl Network { &self.swarm.behaviour().discovery } /// Provides IP addresses and peer information. - pub fn identify(&self) -> &Identify { + pub fn identify(&self) -> &identify::Behaviour { &self.swarm.behaviour().identify } /// The peer manager that keeps track of peer's reputation and status. @@ -1045,9 +1052,12 @@ impl Network { /* Sub-behaviour event handling functions */ /// Handle a gossipsub event. - fn inject_gs_event(&mut self, event: GossipsubEvent) -> Option> { + fn inject_gs_event( + &mut self, + event: gossipsub::Event, + ) -> Option> { match event { - GossipsubEvent::Message { + gossipsub::Event::Message { propagation_source, message_id: id, message: gs_msg, @@ -1077,7 +1087,7 @@ impl Network { } } } - GossipsubEvent::Subscribed { peer_id, topic } => { + gossipsub::Event::Subscribed { peer_id, topic } => { if let Ok(topic) = GossipTopic::decode(topic.as_str()) { if let Some(subnet_id) = topic.subnet_id() { self.network_globals @@ -1118,7 +1128,7 @@ impl Network { } } } - GossipsubEvent::Unsubscribed { peer_id, topic } => { + gossipsub::Event::Unsubscribed { peer_id, topic } => { if let Some(subnet_id) = subnet_from_topic_hash(&topic) { self.network_globals .peers @@ -1126,7 +1136,7 @@ impl Network { .remove_subscription(&peer_id, &subnet_id); } } - GossipsubEvent::GossipsubNotSupported { peer_id } => { + gossipsub::Event::GossipsubNotSupported { peer_id } => { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); self.peer_manager_mut().report_peer( &peer_id, @@ -1340,10 +1350,10 @@ impl Network { /// Handle an identify event. fn inject_identify_event( &mut self, - event: IdentifyEvent, + event: identify::Event, ) -> Option> { match event { - IdentifyEvent::Received { peer_id, mut info } => { + identify::Event::Received { peer_id, mut info } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { debug!( self.log, @@ -1354,9 +1364,9 @@ impl Network { // send peer info to the peer manager. self.peer_manager_mut().identify(&peer_id, &info); } - IdentifyEvent::Sent { .. } => {} - IdentifyEvent::Error { .. } => {} - IdentifyEvent::Pushed { .. } => {} + identify::Event::Sent { .. } => {} + identify::Event::Error { .. } => {} + identify::Event::Pushed { .. } => {} } None } @@ -1377,14 +1387,17 @@ impl Network { Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.swarm.ban_peer_id(peer_id); + self.swarm.behaviour_mut().banned_peers.block_peer(peer_id); self.discovery_mut().ban_peer(&peer_id, associated_ips); - Some(NetworkEvent::PeerBanned(peer_id)) + None } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.swarm.unban_peer_id(peer_id); + self.swarm + .behaviour_mut() + .banned_peers + .unblock_peer(peer_id); self.discovery_mut().unban_peer(&peer_id, associated_ips); - Some(NetworkEvent::PeerUnbanned(peer_id)) + None } PeerManagerEvent::Status(peer_id) => { // it's time to status. We don't keep a beacon chain reference here, so we inform @@ -1431,17 +1444,20 @@ impl Network { let maybe_event = match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { // Handle sub-behaviour events. + BehaviourEvent::BannedPeers(void) => void::unreachable(void), BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), }, SwarmEvent::ConnectionEstablished { .. } => None, SwarmEvent::ConnectionClosed { .. } => None, SwarmEvent::IncomingConnection { local_addr, send_back_addr, + connection_id: _, } => { trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); None @@ -1450,19 +1466,41 @@ impl Network { local_addr, send_back_addr, error, + connection_id: _, } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); + let error_repr = match error { + libp2p::swarm::ListenError::Aborted => { + "Incoming connection aborted".to_string() + } + libp2p::swarm::ListenError::WrongPeerId { obtained, endpoint } => { + format!("Wrong peer id, obtained {obtained}, endpoint {endpoint:?}") + } + libp2p::swarm::ListenError::LocalPeerId { endpoint } => { + format!("Dialing local peer id {endpoint:?}") + } + libp2p::swarm::ListenError::Denied { cause } => { + format!("Connection was denied with cause {cause}") + } + libp2p::swarm::ListenError::Transport(t) => match t { + libp2p::TransportError::MultiaddrNotSupported(m) => { + format!("Transport error: Multiaddr not supported: {m}") + } + libp2p::TransportError::Other(e) => { + format!("Transport error: other: {e}") + } + }, + }; + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr); None } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); - None - } - SwarmEvent::BannedPeer { - peer_id, - endpoint: _, + SwarmEvent::OutgoingConnectionError { + peer_id: _, + error: _, + connection_id: _, } => { - debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + // The Behaviour event is more general than the swarm event here. It includes + // connection failures. So we use that log for now, in the peer manager + // behaviour implementation. None } SwarmEvent::NewListenAddr { address, .. } => { @@ -1491,7 +1529,13 @@ impl Network { None } } - SwarmEvent::Dialing(_) => None, + SwarmEvent::Dialing { + peer_id, + connection_id: _, + } => { + debug!(self.log, "Swarm Dialing"; "peer_id" => ?peer_id); + None + } }; if let Some(ev) = maybe_event { diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index ac0dc57d7b..21fd09b6b0 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -4,13 +4,11 @@ use crate::types::{ error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, }; use crate::{GossipTopic, NetworkConfig}; -use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks}; -use libp2p::core::{ - identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, -}; -use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter; -use libp2p::gossipsub::IdentTopic as Topic; -use libp2p::{core, noise, PeerId, Transport}; +use libp2p::bandwidth::BandwidthSinks; +use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; +use libp2p::gossipsub; +use libp2p::identity::{secp256k1, Keypair}; +use libp2p::{core, noise, yamux, PeerId, Transport, TransportExt}; use prometheus_client::registry::Registry; use slog::{debug, warn}; use ssz::Decode; @@ -52,30 +50,19 @@ pub fn build_transport( transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) }; - let (transport, bandwidth) = BandwidthLogging::new(transport); - - // mplex config - let mut mplex_config = libp2p::mplex::MplexConfig::new(); - mplex_config.set_max_buffer_size(256); - mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); - // yamux config - let mut yamux_config = libp2p::yamux::YamuxConfig::default(); - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + let mut yamux_config = yamux::Config::default(); + yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); + let (transport, bandwidth) = transport + .upgrade(core::upgrade::Version::V1) + .authenticate(generate_noise_config(&local_private_key)) + .multiplex(yamux_config) + .timeout(Duration::from_secs(10)) + .boxed() + .with_bandwidth_logging(); // Authentication - Ok(( - transport - .upgrade(core::upgrade::Version::V1) - .authenticate(generate_noise_config(&local_private_key)) - .multiplex(core::upgrade::SelectUpgrade::new( - yamux_config, - mplex_config, - )) - .timeout(Duration::from_secs(10)) - .boxed(), - bandwidth, - )) + Ok((transport, bandwidth)) } // Useful helper functions for debugging. Currently not used in the client. @@ -94,10 +81,10 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result { #[allow(dead_code)] fn keypair_from_bytes(mut bytes: Vec) -> error::Result { - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) + secp256k1::SecretKey::try_from_bytes(&mut bytes) .map(|secret| { - let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); - Keypair::Secp256k1(keypair) + let keypair: secp256k1::Keypair = secret.into(); + keypair.into() }) .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) } @@ -115,12 +102,10 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { Err(_) => debug!(log, "Could not read network key file"), Ok(_) => { // only accept secp256k1 keys for now - if let Ok(secret_key) = - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) - { - let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into(); + if let Ok(secret_key) = secp256k1::SecretKey::try_from_bytes(&mut key_bytes) { + let kp: secp256k1::Keypair = secret_key.into(); debug!(log, "Loaded network key from disk."); - return Keypair::Secp256k1(kp); + return kp.into(); } else { debug!(log, "Network key file is not a valid secp256k1 key"); } @@ -129,34 +114,27 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { } // if a key could not be loaded from disk, generate a new one and save it - let local_private_key = Keypair::generate_secp256k1(); - if let Keypair::Secp256k1(key) = local_private_key.clone() { - let _ = std::fs::create_dir_all(&config.network_dir); - match File::create(network_key_f.clone()) - .and_then(|mut f| f.write_all(&key.secret().to_bytes())) - { - Ok(_) => { - debug!(log, "New network key generated and written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write node key to file: {:?}. error: {}", network_key_f, e - ); - } + let local_private_key = secp256k1::Keypair::generate(); + let _ = std::fs::create_dir_all(&config.network_dir); + match File::create(network_key_f.clone()) + .and_then(|mut f| f.write_all(&local_private_key.secret().to_bytes())) + { + Ok(_) => { + debug!(log, "New network key generated and written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write node key to file: {:?}. error: {}", network_key_f, e + ); } } - local_private_key + local_private_key.into() } /// Generate authenticated XX Noise config from identity keys -fn generate_noise_config( - identity_keypair: &Keypair, -) -> noise::NoiseAuthenticated { - let static_dh_keys = noise::Keypair::::new() - .into_authentic(identity_keypair) - .expect("signing can fail only once during starting a node"); - noise::NoiseConfig::xx(static_dh_keys).into_authenticated() +fn generate_noise_config(identity_keypair: &Keypair) -> noise::Config { + noise::Config::new(identity_keypair).expect("signing can fail only once during starting a node") } /// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p @@ -236,11 +214,11 @@ pub(crate) fn create_whitelist_filter( possible_fork_digests: Vec<[u8; 4]>, attestation_subnet_count: u64, sync_committee_subnet_count: u64, -) -> WhitelistSubscriptionFilter { +) -> gossipsub::WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { let mut add = |kind| { - let topic: Topic = + let topic: gossipsub::IdentTopic = GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); possible_hashes.insert(topic.hash()); }; @@ -262,7 +240,7 @@ pub(crate) fn create_whitelist_filter( add(SyncCommitteeMessage(SyncSubnetId::new(id))); } } - WhitelistSubscriptionFilter(possible_hashes) + gossipsub::WhitelistSubscriptionFilter(possible_hashes) } /// Persist metadata to disk diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index bb0397de1e..06732ac99f 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -2,7 +2,7 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; -use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage}; +use libp2p::gossipsub; use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; @@ -56,12 +56,12 @@ impl SnappyTransform { } } -impl DataTransform for SnappyTransform { +impl gossipsub::DataTransform for SnappyTransform { // Provides the snappy decompression from RawGossipsubMessages fn inbound_transform( &self, - raw_message: RawGossipsubMessage, - ) -> Result { + raw_message: gossipsub::RawMessage, + ) -> Result { // check the length of the raw bytes let len = decompress_len(&raw_message.data)?; if len > self.max_size_per_message { @@ -75,7 +75,7 @@ impl DataTransform for SnappyTransform { let decompressed_data = decoder.decompress_vec(&raw_message.data)?; // Build the GossipsubMessage struct - Ok(GossipsubMessage { + Ok(gossipsub::Message { source: raw_message.source, data: decompressed_data, sequence_number: raw_message.sequence_number, diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 64714cbc0a..b48891335c 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -1,5 +1,5 @@ #![cfg(test)] -use libp2p::gossipsub::GossipsubConfigBuilder; +use libp2p::gossipsub; use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::Enr; use lighthouse_network::EnrExt; @@ -81,7 +81,7 @@ pub fn build_config(port: u16, mut boot_nodes: Vec) -> NetworkConfig { config.boot_nodes_enr.append(&mut boot_nodes); config.network_dir = path.into_path(); // Reduce gossipsub heartbeat parameters - config.gs_config = GossipsubConfigBuilder::from(config.gs_config) + config.gs_config = gossipsub::ConfigBuilder::from(config.gs_config) .heartbeat_initial_delay(Duration::from_millis(500)) .heartbeat_interval(Duration::from_millis(500)) .build() diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index b517d57df3..c355c671e8 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -493,10 +493,8 @@ impl NetworkService { NetworkEvent::PeerConnectedOutgoing(peer_id) => { self.send_to_router(RouterMessage::StatusPeer(peer_id)); } - NetworkEvent::PeerConnectedIncoming(_) - | NetworkEvent::PeerBanned(_) - | NetworkEvent::PeerUnbanned(_) => { - // No action required for these events. + NetworkEvent::PeerConnectedIncoming(_) => { + // No action required for this event. } NetworkEvent::PeerDisconnected(peer_id) => { self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 1504b7ff0f..58e6917eca 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -28,7 +28,7 @@ operating system. Install the following packages: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: @@ -51,10 +51,6 @@ After this, you are ready to [build Lighthouse](#build-lighthouse). brew install cmake ``` -1. Install protoc using Homebrew: -``` -brew install protobuf -``` [Homebrew]: https://brew.sh/ @@ -71,7 +67,7 @@ After this, you are ready to [build Lighthouse](#build-lighthouse). Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) ``` > - To verify that Chocolatey is ready, run `choco` and it should return the version. -1. Install Make, CMake, LLVM and protoc using Chocolatey: +1. Install Make, CMake and LLVM using Chocolatey: ``` choco install make @@ -85,10 +81,6 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' choco install llvm ``` -``` -choco install protoc -``` - These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. @@ -217,4 +209,3 @@ look into [cross compilation](./cross-compiling.md), or use a [pre-built binary](https://github.com/sigp/lighthouse/releases). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. - diff --git a/book/src/pi.md b/book/src/pi.md index d8d154d765..550415240b 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -22,7 +22,7 @@ terminal and an Internet connection are necessary. Install the Ubuntu dependencies: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: diff --git a/book/src/setup.md b/book/src/setup.md index ea3c5664ac..533e1d463d 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -14,8 +14,6 @@ The additional requirements for developers are: don't have `anvil` available on your `PATH`. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. -- [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for - the networking stack. - [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum, used by web3signer_tests. - [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also know as diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index c4e36022a8..d006156bf9 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -80,7 +80,7 @@ impl BootNodeConfig { } let private_key = load_private_key(&network_config, &logger); - let local_key = CombinedKey::from_libp2p(&private_key)?; + let local_key = CombinedKey::from_libp2p(private_key)?; let local_enr = if let Some(dir) = matches.value_of("network-dir") { let network_dir: PathBuf = dir.into(); diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 296d43b1a2..338a2d243b 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -18,4 +18,4 @@ serde_yaml = "0.8.13" types = { path = "../../consensus/types"} ethereum_ssz = "0.5.0" eth2_config = { path = "../eth2_config"} -discv5 = "0.3.0" \ No newline at end of file +discv5 = "0.3.1" \ No newline at end of file diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 98f33f2153..a50aa17027 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -2,7 +2,7 @@ # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE @@ -10,4 +10,4 @@ RUN cd lighthouse && make install-lcli FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli +COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli \ No newline at end of file diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile deleted file mode 100644 index 5472b980ba..0000000000 --- a/scripts/cross/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -ARG CROSS_BASE_IMAGE -FROM $CROSS_BASE_IMAGE - -RUN apt-get update -y && apt-get upgrade -y - -RUN apt-get install -y unzip && \ - PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \ - curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip -o protoc.zip && \ - unzip protoc.zip -d /usr && \ - chmod +x /usr/bin/protoc - -RUN apt-get install -y cmake clang-3.9 - -ENV PROTOC=/usr/bin/protoc diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index ddc49e13cd..c790e248df 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,5 +1,5 @@ FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse # Build lighthouse directly with a cargo build command, bypassing the Makefile. @@ -22,4 +22,4 @@ COPY --from=builder /lighthouse/testing/antithesis/libvoidstar/libvoidstar.so /u ENV LD_LIBRARY_PATH=/usr/lib # move the lighthouse binary and lcli binary COPY --from=builder /lighthouse/target/x86_64-unknown-linux-gnu/release/lighthouse /usr/local/bin/lighthouse -COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli +COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli \ No newline at end of file From fcf51d691ed3a9bb0e0d6d6050d1a83b81805ef8 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Wed, 2 Aug 2023 23:50:41 +0000 Subject: [PATCH 29/75] fix typo (#4555) --- beacon_node/beacon_chain/src/timeout_rw_lock.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/timeout_rw_lock.rs b/beacon_node/beacon_chain/src/timeout_rw_lock.rs index 28e3f4c29c..b2eea76265 100644 --- a/beacon_node/beacon_chain/src/timeout_rw_lock.rs +++ b/beacon_node/beacon_chain/src/timeout_rw_lock.rs @@ -5,7 +5,7 @@ use std::time::Duration; /// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a /// time-out (i.e., no indefinitely-blocking operations). /// -/// Timeouts can be optionally be disabled at runtime for all instances of this type by calling +/// Timeouts can be optionally disabled at runtime for all instances of this type by calling /// `TimeoutRwLock::disable_timeouts()`. pub struct TimeoutRwLock(RwLock); From b5e25aeb2fcc4338730b864c8178e14fc2cce232 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Wed, 2 Aug 2023 23:50:42 +0000 Subject: [PATCH 30/75] CommitteeCache.initialized: fail early if possible (#4556) --- consensus/types/src/beacon_state/committee_cache.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 2db8fbe763..bbe81b9300 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -56,6 +56,11 @@ impl CommitteeCache { return Err(Error::ZeroSlotsPerEpoch); } + // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. + if state.validators().len() == usize::max_value() { + return Err(Error::TooManyValidators); + } + let active_validator_indices = get_active_validator_indices(state.validators(), epoch); if active_validator_indices.is_empty() { @@ -75,11 +80,6 @@ impl CommitteeCache { ) .ok_or(Error::UnableToShuffle)?; - // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. - if state.validators().len() == usize::max_value() { - return Err(Error::TooManyValidators); - } - let mut shuffling_positions = vec![<_>::default(); state.validators().len()]; for (i, &v) in shuffling.iter().enumerate() { *shuffling_positions From 7399a54ca301bfc6d0dc2c718a73c49d7a752e61 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Wed, 2 Aug 2023 23:50:43 +0000 Subject: [PATCH 31/75] CommitteeCache.get_all_beacon_committees: set correct capacity to avoid realloc (#4557) --- consensus/types/src/beacon_state/committee_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index bbe81b9300..64bf686f34 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -174,7 +174,7 @@ impl CommitteeCache { .ok_or(Error::CommitteeCacheUninitialized(None))?; initialized_epoch.slot_iter(self.slots_per_epoch).try_fold( - Vec::with_capacity(self.slots_per_epoch as usize), + Vec::with_capacity(self.epoch_committee_count()), |mut vec, slot| { vec.append(&mut self.get_beacon_committees_at_slot(slot)?); Ok(vec) From 33976121601d9d31d26c1bb2e1f0676e9c4d24fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arma=C4=9Fan=20Y=C4=B1ld=C4=B1rak?= <41453366+armaganyildirak@users.noreply.github.com> Date: Thu, 3 Aug 2023 01:51:47 +0000 Subject: [PATCH 32/75] Shift networking configuration (#4426) ## Issue Addressed Addresses [#4401](https://github.com/sigp/lighthouse/issues/4401) ## Proposed Changes Shift some constants into ```ChainSpec``` and remove the constant values from code space. ## Additional Info I mostly used ```MainnetEthSpec::default_spec()``` for getting ```ChainSpec```. I wonder Did I make a mistake about that. Co-authored-by: armaganyildirak Co-authored-by: Paul Hauner Co-authored-by: Age Manning Co-authored-by: Diva M --- Cargo.lock | 1 + .../src/attestation_verification.rs | 20 +- beacon_node/beacon_chain/src/beacon_chain.rs | 5 - .../beacon_chain/src/block_verification.rs | 4 +- beacon_node/beacon_chain/src/lib.rs | 2 +- ...ght_client_finality_update_verification.rs | 7 +- ...t_client_optimistic_update_verification.rs | 7 +- .../src/sync_committee_verification.rs | 17 +- beacon_node/beacon_processor/src/lib.rs | 8 +- .../src/work_reprocessing_queue.rs | 8 +- beacon_node/client/src/builder.rs | 3 +- beacon_node/http_api/src/attester_duties.rs | 6 +- beacon_node/http_api/src/proposer_duties.rs | 4 +- beacon_node/http_api/src/sync_committees.rs | 4 +- beacon_node/http_api/tests/tests.rs | 12 +- beacon_node/lighthouse_network/src/config.rs | 36 ++-- .../lighthouse_network/src/rpc/codec/base.rs | 15 +- .../src/rpc/codec/ssz_snappy.rs | 122 +++++++++--- .../lighthouse_network/src/rpc/handler.rs | 36 ++-- beacon_node/lighthouse_network/src/rpc/mod.rs | 31 ++- .../lighthouse_network/src/rpc/protocol.rs | 20 +- .../lighthouse_network/src/service/mod.rs | 18 +- .../lighthouse_network/tests/common.rs | 11 +- .../lighthouse_network/tests/rpc_tests.rs | 52 +++-- .../gossip_methods.rs | 3 + .../src/network_beacon_processor/mod.rs | 11 -- .../src/network_beacon_processor/tests.rs | 15 +- beacon_node/src/config.rs | 6 +- .../gnosis/config.yaml | 12 +- .../mainnet/config.yaml | 12 +- .../prater/config.yaml | 12 +- .../sepolia/config.yaml | 12 +- consensus/types/Cargo.toml | 1 + consensus/types/src/chain_spec.rs | 185 +++++++++++++++--- consensus/types/src/subnet_id.rs | 4 +- .../environment/tests/testnet_dir/config.yaml | 14 ++ 36 files changed, 523 insertions(+), 213 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b2e8188eec..13f2b7cd43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8391,6 +8391,7 @@ dependencies = [ "merkle_proof", "metastruct", "parking_lot 0.12.1", + "paste", "rand 0.8.5", "rand_xorshift", "rayon", diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 6df0758b2e..5535fec37c 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -35,10 +35,8 @@ mod batch; use crate::{ - beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, - metrics, - observed_aggregates::ObserveOutcome, - observed_attesters::Error as ObservedAttestersError, + beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, + observed_aggregates::ObserveOutcome, observed_attesters::Error as ObservedAttestersError, BeaconChain, BeaconChainError, BeaconChainTypes, }; use bls::verify_signature_sets; @@ -57,8 +55,8 @@ use std::borrow::Cow; use strum::AsRefStr; use tree_hash::TreeHash; use types::{ - Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, - SelectionProof, SignedAggregateAndProof, Slot, SubnetId, + Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, + IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, }; pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; @@ -454,7 +452,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(&chain.slot_clock, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; // Check the attestation's epoch matches its target. if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) @@ -722,7 +720,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(&chain.slot_clock, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // aggregation bit set. @@ -1037,11 +1035,11 @@ fn verify_head_block_is_known( pub fn verify_propagation_slot_range( slot_clock: &S, attestation: &Attestation, + spec: &ChainSpec, ) -> Result<(), Error> { let attestation_slot = attestation.data.slot; - let latest_permissible_slot = slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; if attestation_slot > latest_permissible_slot { return Err(Error::FutureSlot { @@ -1052,7 +1050,7 @@ pub fn verify_propagation_slot_range( // Taking advantage of saturating subtraction on `Slot`. let earliest_permissible_slot = slot_clock - .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)? - E::slots_per_epoch(); if attestation_slot < earliest_permissible_slot { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 78f2c3f03b..25964ed216 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -217,11 +217,6 @@ pub enum OverrideForkchoiceUpdate { AlreadyApplied, } -/// The accepted clock drift for nodes gossiping blocks and attestations. See: -/// -/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration -pub const MAXIMUM_GOSSIP_CLOCK_DISPARITY: Duration = Duration::from_millis(500); - #[derive(Debug, PartialEq)] pub enum AttestationProcessingOutcome { Processed, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 492f492521..0a82eae371 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -59,7 +59,7 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ beacon_chain::{ BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, - MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; @@ -730,7 +730,7 @@ impl GossipVerifiedBlock { // Do not gossip or process blocks from future slots. let present_slot_with_tolerance = chain .slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; if block.slot() > present_slot_with_tolerance { return Err(BlockError::FutureSlot { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 85ff0f20a0..4ea1eeee01 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -54,7 +54,7 @@ pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 7c431ebccc..638d2b4012 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -1,6 +1,4 @@ -use crate::{ - beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, -}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use derivative::Derivative; use slot_clock::SlotClock; use std::time::Duration; @@ -103,7 +101,8 @@ impl VerifiedLightClientFinalityUpdate { // verify that enough time has passed for the block to have been propagated match start_time { Some(time) => { - if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration + if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() + < time + one_third_slot_duration { return Err(Error::TooEarly); } diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 20d7181808..2d1a5cf97c 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -1,6 +1,4 @@ -use crate::{ - beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, -}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use derivative::Derivative; use eth2::types::Hash256; use slot_clock::SlotClock; @@ -103,7 +101,8 @@ impl VerifiedLightClientOptimisticUpdate { // verify that enough time has passed for the block to have been propagated match start_time { Some(time) => { - if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration + if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() + < time + one_third_slot_duration { return Err(Error::TooEarly); } diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 246bb12cc0..5c6710bfd6 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -28,10 +28,8 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::{ - beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, - metrics, - observed_aggregates::ObserveOutcome, - BeaconChain, BeaconChainError, BeaconChainTypes, + beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, + observed_aggregates::ObserveOutcome, BeaconChain, BeaconChainError, BeaconChainTypes, }; use bls::{verify_signature_sets, PublicKeyBytes}; use derivative::Derivative; @@ -52,6 +50,7 @@ use tree_hash_derive::TreeHash; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; use types::sync_committee::Error as SyncCommitteeError; +use types::ChainSpec; use types::{ sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, @@ -297,7 +296,7 @@ impl VerifiedSyncContribution { let subcommittee_index = contribution.subcommittee_index as usize; // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance. - verify_propagation_slot_range(&chain.slot_clock, contribution)?; + verify_propagation_slot_range(&chain.slot_clock, contribution, &chain.spec)?; // Validate subcommittee index. if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { @@ -460,7 +459,7 @@ impl VerifiedSyncCommitteeMessage { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future sync committee messages for later processing. - verify_propagation_slot_range(&chain.slot_clock, &sync_message)?; + verify_propagation_slot_range(&chain.slot_clock, &sync_message, &chain.spec)?; // Ensure the `subnet_id` is valid for the given validator. let pubkey = chain @@ -576,11 +575,11 @@ impl VerifiedSyncCommitteeMessage { pub fn verify_propagation_slot_range( slot_clock: &S, sync_contribution: &U, + spec: &ChainSpec, ) -> Result<(), Error> { let message_slot = sync_contribution.get_slot(); - let latest_permissible_slot = slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; if message_slot > latest_permissible_slot { return Err(Error::FutureSlot { @@ -590,7 +589,7 @@ pub fn verify_propagation_slot_range( } let earliest_permissible_slot = slot_clock - .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; if message_slot < earliest_permissible_slot { diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 88066f2a30..297c4868db 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -661,7 +661,8 @@ impl BeaconProcessor { work_reprocessing_rx: mpsc::Receiver, work_journal_tx: Option>, slot_clock: S, - ) { + maximum_gossip_clock_disparity: Duration, + ) -> Result<(), String> { // Used by workers to communicate that they are finished a task. let (idle_tx, idle_rx) = mpsc::channel::<()>(MAX_IDLE_QUEUE_LEN); @@ -717,13 +718,15 @@ impl BeaconProcessor { // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel::(MAX_SCHEDULED_WORK_QUEUE_LEN); + spawn_reprocess_scheduler( ready_work_tx, work_reprocessing_rx, &self.executor, slot_clock, self.log.clone(), - ); + maximum_gossip_clock_disparity, + )?; let executor = self.executor.clone(); @@ -1203,6 +1206,7 @@ impl BeaconProcessor { // Spawn on the core executor. executor.spawn(manager_future, MANAGER_TASK_NAME); + Ok(()) } /// Spawns a blocking worker thread to process some `Work`. diff --git a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs index 608f634d53..9191509d39 100644 --- a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -361,7 +361,12 @@ pub fn spawn_reprocess_scheduler( executor: &TaskExecutor, slot_clock: S, log: Logger, -) { + maximum_gossip_clock_disparity: Duration, +) -> Result<(), String> { + // Sanity check + if ADDITIONAL_QUEUED_BLOCK_DELAY >= maximum_gossip_clock_disparity { + return Err("The block delay and gossip disparity don't match.".to_string()); + } let mut queue = ReprocessQueue { work_reprocessing_rx, ready_work_tx, @@ -400,6 +405,7 @@ pub fn spawn_reprocess_scheduler( }, TASK_NAME, ); + Ok(()) } impl ReprocessQueue { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 14edbb9730..71a9b28fb0 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -795,7 +795,8 @@ where self.work_reprocessing_rx, None, beacon_chain.slot_clock.clone(), - ); + beacon_chain.spec.maximum_gossip_clock_disparity(), + )?; } let state_advance_context = runtime_context.service_context("state_advance".into()); diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 5c3e420839..aad405d56b 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -1,9 +1,7 @@ //! Contains the handler for the `GET validator/duties/attester/{epoch}` endpoint. use crate::state_id::StateId; -use beacon_chain::{ - BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, -}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{self as api_types}; use slot_clock::SlotClock; use state_processing::state_advance::partial_state_advance; @@ -32,7 +30,7 @@ pub fn attester_duties( // will equal `current_epoch + 1` let tolerant_current_epoch = chain .slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? .epoch(T::EthSpec::slots_per_epoch()); diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 7e946b89e7..708df39b4d 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -3,7 +3,7 @@ use crate::state_id::StateId; use beacon_chain::{ beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch}, - BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + BeaconChain, BeaconChainError, BeaconChainTypes, }; use eth2::types::{self as api_types}; use safe_arith::SafeArith; @@ -33,7 +33,7 @@ pub fn proposer_duties( // will equal `current_epoch + 1` let tolerant_current_epoch = chain .slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? .epoch(T::EthSpec::slots_per_epoch()); diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 07dfb5c988..dcf41429f6 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -6,7 +6,7 @@ use beacon_chain::sync_committee_verification::{ }; use beacon_chain::{ validator_monitor::timestamp_now, BeaconChain, BeaconChainError, BeaconChainTypes, - StateSkipConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + StateSkipConfig, }; use eth2::types::{self as api_types}; use lighthouse_network::PubsubMessage; @@ -85,7 +85,7 @@ fn duties_from_state_load( let current_epoch = chain.epoch()?; let tolerant_current_epoch = chain .slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)? .epoch(T::EthSpec::slots_per_epoch()); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 7c3872925a..28eb106e8d 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,7 +1,7 @@ use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, - BeaconChain, StateSkipConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + BeaconChain, StateSkipConfig, WhenSlotSkipped, }; use environment::null_logger; use eth2::{ @@ -2313,7 +2313,9 @@ impl ApiTester { .unwrap(); self.chain.slot_clock.set_current_time( - current_epoch_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY - Duration::from_millis(1), + current_epoch_start + - self.chain.spec.maximum_gossip_clock_disparity() + - Duration::from_millis(1), ); let dependent_root = self @@ -2350,9 +2352,9 @@ impl ApiTester { "should not get attester duties outside of tolerance" ); - self.chain - .slot_clock - .set_current_time(current_epoch_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY); + self.chain.slot_clock.set_current_time( + current_epoch_start - self.chain.spec.maximum_gossip_clock_disparity(), + ); self.client .get_validator_duties_proposer(current_epoch) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 6c8f20a24b..0ab7c03e7f 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -16,11 +16,6 @@ use std::sync::Arc; use std::time::Duration; use types::{ForkContext, ForkName}; -/// The maximum transmit size of gossip messages in bytes pre-merge. -const GOSSIP_MAX_SIZE: usize = 1_048_576; // 1M -/// The maximum transmit size of gossip messages in bytes post-merge. -const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M - /// The cache time is set to accommodate the circulation time of an attestation. /// /// The p2p spec declares that we accept attestations within the following range: @@ -35,20 +30,20 @@ const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M /// another 500ms for "fudge factor". pub const DUPLICATE_CACHE_TIME: Duration = Duration::from_secs(33 * 12 + 1); -// We treat uncompressed messages as invalid and never use the INVALID_SNAPPY_DOMAIN as in the -// specification. We leave it here for posterity. -// const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0]; -const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0]; - /// The maximum size of gossip messages. -pub fn gossip_max_size(is_merge_enabled: bool) -> usize { +pub fn gossip_max_size(is_merge_enabled: bool, gossip_max_size: usize) -> usize { if is_merge_enabled { - GOSSIP_MAX_SIZE_POST_MERGE + gossip_max_size } else { - GOSSIP_MAX_SIZE + gossip_max_size / 10 } } +pub struct GossipsubConfigParams { + pub message_domain_valid_snappy: [u8; 4], + pub gossip_max_size: usize, +} + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] /// Network configuration for lighthouse. @@ -413,7 +408,11 @@ impl From for NetworkLoad { } /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. -pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> gossipsub::Config { +pub fn gossipsub_config( + network_load: u8, + fork_context: Arc, + gossipsub_config_params: GossipsubConfigParams, +) -> gossipsub::Config { // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(topic, data) for content addressing let fast_gossip_message_id = |message: &gossipsub::RawMessage| { @@ -446,12 +445,12 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> gos } } } - + let message_domain_valid_snappy = gossipsub_config_params.message_domain_valid_snappy; let is_merge_enabled = fork_context.fork_exists(ForkName::Merge); let gossip_message_id = move |message: &gossipsub::Message| { gossipsub::MessageId::from( &Sha256::digest( - prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(), + prefix(message_domain_valid_snappy, message, fork_context.clone()).as_slice(), )[..20], ) }; @@ -459,7 +458,10 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> gos let load = NetworkLoad::from(network_load); gossipsub::ConfigBuilder::default() - .max_transmit_size(gossip_max_size(is_merge_enabled)) + .max_transmit_size(gossip_max_size( + is_merge_enabled, + gossipsub_config_params.gossip_max_size, + )) .heartbeat_interval(load.heartbeat_interval) .mesh_n(load.mesh_n) .mesh_n_low(load.mesh_n_low) diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index d568f27897..943d4a3bce 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -217,9 +217,12 @@ mod tests { let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(ForkName::Base)); + + let chain_spec = Spec::default_spec(); + let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, - max_rpc_size(&fork_context), + max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), fork_context, ); @@ -251,9 +254,12 @@ mod tests { let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(ForkName::Base)); + + let chain_spec = Spec::default_spec(); + let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, - max_rpc_size(&fork_context), + max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), fork_context, ); @@ -279,7 +285,10 @@ mod tests { // Response limits let fork_context = Arc::new(fork_context(ForkName::Base)); - let max_rpc_size = max_rpc_size(&fork_context); + + let chain_spec = Spec::default_spec(); + + let max_rpc_size = max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize); let limit = protocol_id.rpc_response_limits::(&fork_context); let mut max = encode_len(limit.max + 1); let mut codec = SSZSnappyOutboundCodec::::new( diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 39cf8b3eb2..f1d94da7ec 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -615,8 +615,8 @@ mod tests { }; use std::sync::Arc; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, - ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, ChainSpec, EmptyBlock, + Epoch, ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, }; use snap::write::FrameEncoder; @@ -658,7 +658,7 @@ mod tests { } /// Merge block with length < max_rpc_size. - fn merge_block_small(fork_context: &ForkContext) -> SignedBeaconBlock { + fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockMerge<_, FullPayload> = BeaconBlockMerge::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); @@ -667,14 +667,14 @@ mod tests { block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); - assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context)); + assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); SignedBeaconBlock::from_block(block, Signature::empty()) } /// Merge block with length > MAX_RPC_SIZE. /// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. - fn merge_block_large(fork_context: &ForkContext) -> SignedBeaconBlock { + fn merge_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockMerge<_, FullPayload> = BeaconBlockMerge::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); @@ -683,7 +683,7 @@ mod tests { block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); - assert!(block.ssz_bytes_len() > max_rpc_size(fork_context)); + assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); SignedBeaconBlock::from_block(block, Signature::empty()) } @@ -737,10 +737,11 @@ mod tests { protocol: SupportedProtocol, message: RPCCodedResponse, fork_name: ForkName, + spec: &ChainSpec, ) -> Result { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context); + let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let mut buf = BytesMut::new(); let mut snappy_inbound_codec = @@ -783,10 +784,11 @@ mod tests { protocol: SupportedProtocol, message: &mut BytesMut, fork_name: ForkName, + spec: &ChainSpec, ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context); + let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); // decode message just as snappy message @@ -798,15 +800,20 @@ mod tests { protocol: SupportedProtocol, message: RPCCodedResponse, fork_name: ForkName, + spec: &ChainSpec, ) -> Result>, RPCError> { - let mut encoded = encode_response(protocol, message, fork_name)?; - decode_response(protocol, &mut encoded, fork_name) + let mut encoded = encode_response(protocol, message, fork_name, spec)?; + decode_response(protocol, &mut encoded, fork_name, spec) } /// Verifies that requests we send are encoded in a way that we would correctly decode too. - fn encode_then_decode_request(req: OutboundRequest, fork_name: ForkName) { + fn encode_then_decode_request( + req: OutboundRequest, + fork_name: ForkName, + spec: &ChainSpec, + ) { let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context); + let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); // Encode a request we send let mut buf = BytesMut::new(); @@ -851,11 +858,14 @@ mod tests { // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v1() { + let chain_spec = Spec::default_spec(); + assert_eq!( encode_then_decode_response( SupportedProtocol::StatusV1, RPCCodedResponse::Success(RPCResponse::Status(status_message())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::Status(status_message()))) ); @@ -865,6 +875,7 @@ mod tests { SupportedProtocol::PingV1, RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::Pong(ping_message()))) ); @@ -874,6 +885,7 @@ mod tests { SupportedProtocol::BlocksByRangeV1, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() @@ -886,6 +898,7 @@ mod tests { SupportedProtocol::BlocksByRangeV1, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::SSZDecodeError(_) @@ -898,6 +911,7 @@ mod tests { SupportedProtocol::BlocksByRootV1, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot( Arc::new(empty_base_block()) @@ -910,6 +924,7 @@ mod tests { SupportedProtocol::BlocksByRootV1, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::SSZDecodeError(_) @@ -922,6 +937,7 @@ mod tests { SupportedProtocol::MetaDataV1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::MetaData(metadata()))), ); @@ -932,6 +948,7 @@ mod tests { SupportedProtocol::MetaDataV1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::MetaData(metadata()))), ); @@ -940,11 +957,14 @@ mod tests { // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v2() { + let chain_spec = Spec::default_spec(); + assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() @@ -959,6 +979,7 @@ mod tests { SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() @@ -970,12 +991,13 @@ mod tests { SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) ); - let merge_block_small = merge_block_small(&fork_context(ForkName::Merge)); - let merge_block_large = merge_block_large(&fork_context(ForkName::Merge)); + let merge_block_small = merge_block_small(&fork_context(ForkName::Merge), &chain_spec); + let merge_block_large = merge_block_large(&fork_context(ForkName::Merge), &chain_spec); assert_eq!( encode_then_decode_response( @@ -984,6 +1006,7 @@ mod tests { merge_block_small.clone() ))), ForkName::Merge, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() @@ -1000,6 +1023,7 @@ mod tests { SupportedProtocol::BlocksByRangeV2, &mut encoded, ForkName::Merge, + &chain_spec, ) .unwrap_err(), RPCError::InvalidData(_) @@ -1012,6 +1036,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot( Arc::new(empty_base_block()) @@ -1026,6 +1051,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot( Arc::new(empty_base_block()) @@ -1037,6 +1063,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) ); @@ -1048,6 +1075,7 @@ mod tests { merge_block_small.clone() ))), ForkName::Merge, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot(Arc::new(merge_block_small)))) ); @@ -1062,6 +1090,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, &mut encoded, ForkName::Merge, + &chain_spec, ) .unwrap_err(), RPCError::InvalidData(_) @@ -1075,6 +1104,7 @@ mod tests { SupportedProtocol::MetaDataV2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::MetaData(metadata_v2()))) ); @@ -1084,6 +1114,7 @@ mod tests { SupportedProtocol::MetaDataV2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::MetaData(metadata_v2()))) ); @@ -1094,11 +1125,14 @@ mod tests { fn test_context_bytes_v2() { let fork_context = fork_context(ForkName::Altair); + let chain_spec = Spec::default_spec(); + // Removing context bytes for v2 messages should error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ) .unwrap(); @@ -1108,7 +1142,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut encoded_bytes, - ForkName::Base + ForkName::Base, + &chain_spec, ) .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), @@ -1118,6 +1153,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ) .unwrap(); @@ -1127,7 +1163,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut encoded_bytes, - ForkName::Base + ForkName::Base, + &chain_spec, ) .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), @@ -1138,6 +1175,7 @@ mod tests { SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ) .unwrap(); @@ -1150,7 +1188,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut wrong_fork_bytes, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::SSZDecodeError(_), @@ -1161,6 +1200,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ) .unwrap(); @@ -1172,7 +1212,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut wrong_fork_bytes, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::SSZDecodeError(_), @@ -1186,6 +1227,7 @@ mod tests { SupportedProtocol::MetaDataV2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), ForkName::Altair, + &chain_spec, ) .unwrap(), ); @@ -1193,7 +1235,8 @@ mod tests { assert!(decode_response( SupportedProtocol::MetaDataV2, &mut encoded_bytes, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .is_err()); @@ -1202,6 +1245,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ) .unwrap(); @@ -1213,7 +1257,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut wrong_fork_bytes, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), @@ -1224,6 +1269,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ) .unwrap(); @@ -1233,7 +1279,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut part, - ForkName::Altair + ForkName::Altair, + &chain_spec, ), Ok(None) ) @@ -1252,9 +1299,12 @@ mod tests { OutboundRequest::MetaData(MetadataRequest::new_v1()), OutboundRequest::MetaData(MetadataRequest::new_v2()), ]; + + let chain_spec = Spec::default_spec(); + for req in requests.iter() { for fork_name in ForkName::list_all() { - encode_then_decode_request(req.clone(), fork_name); + encode_then_decode_request(req.clone(), fork_name, &chain_spec); } } } @@ -1308,9 +1358,16 @@ mod tests { assert_eq!(writer.get_ref().len(), 42); dst.extend_from_slice(writer.get_ref()); + let chain_spec = Spec::default_spec(); // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( - decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(), + decode_response( + SupportedProtocol::StatusV1, + &mut dst, + ForkName::Base, + &chain_spec + ) + .unwrap_err(), RPCError::InvalidData(_) )); } @@ -1365,12 +1422,15 @@ mod tests { assert_eq!(writer.get_ref().len(), 8103); dst.extend_from_slice(writer.get_ref()); + let chain_spec = Spec::default_spec(); + // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( decode_response( SupportedProtocol::BlocksByRangeV2, &mut dst, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::InvalidData(_) @@ -1398,8 +1458,12 @@ mod tests { let mut uvi_codec: Uvi = Uvi::default(); let mut dst = BytesMut::with_capacity(1024); + let chain_spec = Spec::default_spec(); + // Insert length-prefix - uvi_codec.encode(MAX_RPC_SIZE + 1, &mut dst).unwrap(); + uvi_codec + .encode(chain_spec.max_chunk_size as usize + 1, &mut dst) + .unwrap(); // Insert snappy stream identifier dst.extend_from_slice(stream_identifier); @@ -1411,7 +1475,13 @@ mod tests { dst.extend_from_slice(writer.get_ref()); assert!(matches!( - decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(), + decode_response( + SupportedProtocol::StatusV1, + &mut dst, + ForkName::Base, + &chain_spec + ) + .unwrap_err(), RPCError::InvalidData(_) )); } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index d42248ad5f..36a5abc086 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -3,9 +3,7 @@ use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, ResponseTermination}; use super::outbound::OutboundRequestContainer; -use super::protocol::{ - max_rpc_size, InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol, -}; +use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; use super::{RPCReceived, RPCSend, ReqId}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; @@ -31,9 +29,6 @@ use tokio::time::{sleep_until, Instant as TInstant, Sleep}; use tokio_util::time::{delay_queue, DelayQueue}; use types::{EthSpec, ForkContext}; -/// The time (in seconds) before a substream that is awaiting a response from the user times out. -pub const RESPONSE_TIMEOUT: u64 = 10; - /// The number of times to retry an outbound upgrade in the case of IO errors. const IO_ERROR_RETRIES: u8 = 3; @@ -131,6 +126,9 @@ where /// Logger for handling RPC streams log: slog::Logger, + + /// Timeout that will me used for inbound and outbound responses. + resp_timeout: Duration, } enum HandlerState { @@ -212,7 +210,8 @@ where pub fn new( listen_protocol: SubstreamProtocol, ()>, fork_context: Arc, - log: slog::Logger, + log: &slog::Logger, + resp_timeout: Duration, ) -> Self { RPCHandler { listen_protocol, @@ -230,7 +229,8 @@ where outbound_io_error_retries: 0, fork_context, waker: None, - log, + log: log.clone(), + resp_timeout, } } @@ -554,7 +554,7 @@ where // Each chunk is allowed RESPONSE_TIMEOUT to be sent. if let Some(ref delay_key) = info.delay_key { self.inbound_substreams_delay - .reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT)); + .reset(delay_key, self.resp_timeout); } // The stream may be currently idle. Attempt to process more @@ -688,7 +688,7 @@ where }; substream_entry.remaining_chunks = Some(remaining_chunks); self.outbound_substreams_delay - .reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT)); + .reset(delay_key, self.resp_timeout); } } else { // either this is a single response request or this response closes the @@ -811,7 +811,7 @@ where OutboundRequestContainer { req: req.clone(), fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context), + max_rpc_size: self.listen_protocol().upgrade().max_rpc_size, }, (), ) @@ -896,10 +896,9 @@ where if expected_responses > 0 { if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { // Store the stream and tag the output. - let delay_key = self.inbound_substreams_delay.insert( - self.current_inbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); + let delay_key = self + .inbound_substreams_delay + .insert(self.current_inbound_substream_id, self.resp_timeout); let awaiting_stream = InboundState::Idle(substream); self.inbound_substreams.insert( self.current_inbound_substream_id, @@ -961,10 +960,9 @@ where let expected_responses = request.expected_responses(); if expected_responses > 0 { // new outbound request. Store the stream and tag the output. - let delay_key = self.outbound_substreams_delay.insert( - self.current_outbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); + let delay_key = self + .outbound_substreams_delay + .insert(self.current_outbound_substream_id, self.resp_timeout); let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { substream: Box::new(substream), request, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 4fd9b516d4..14f77e4ba2 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -17,6 +17,7 @@ use slog::{crit, debug, o}; use std::marker::PhantomData; use std::sync::Arc; use std::task::{Context, Poll}; +use std::time::Duration; use types::{EthSpec, ForkContext}; pub(crate) use handler::HandlerErr; @@ -107,6 +108,12 @@ pub struct RPCMessage { type BehaviourAction = ToSwarm, RPCSend>; +pub struct NetworkParams { + pub max_chunk_size: usize, + pub ttfb_timeout: Duration, + pub resp_timeout: Duration, +} + /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. pub struct RPC { @@ -120,6 +127,8 @@ pub struct RPC { enable_light_client_server: bool, /// Slog logger for RPC behaviour. log: slog::Logger, + /// Networking constant values + network_params: NetworkParams, } impl RPC { @@ -129,6 +138,7 @@ impl RPC { inbound_rate_limiter_config: Option, outbound_rate_limiter_config: Option, log: slog::Logger, + network_params: NetworkParams, ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); @@ -149,6 +159,7 @@ impl RPC { fork_context, enable_light_client_server, log, + network_params, } } @@ -220,16 +231,22 @@ where let protocol = SubstreamProtocol::new( RPCProtocol { fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context), + max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size), enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, + ttfb_timeout: self.network_params.ttfb_timeout, }, (), ); // NOTE: this is needed because PeerIds have interior mutability. let peer_repr = peer_id.to_string(); let log = self.log.new(slog::o!("peer_id" => peer_repr)); - let handler = RPCHandler::new(protocol, self.fork_context.clone(), log); + let handler = RPCHandler::new( + protocol, + self.fork_context.clone(), + &log, + self.network_params.resp_timeout, + ); Ok(handler) } @@ -244,9 +261,10 @@ where let protocol = SubstreamProtocol::new( RPCProtocol { fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context), + max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size), enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, + ttfb_timeout: self.network_params.ttfb_timeout, }, (), ); @@ -254,7 +272,12 @@ where // NOTE: this is needed because PeerIds have interior mutability. let peer_repr = peer_id.to_string(); let log = self.log.new(slog::o!("peer_id" => peer_repr)); - let handler = RPCHandler::new(protocol, self.fork_context.clone(), log); + let handler = RPCHandler::new( + protocol, + self.fork_context.clone(), + &log, + self.network_params.resp_timeout, + ); Ok(handler) } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 22f9f19d68..f2a39470b9 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -72,7 +72,7 @@ lazy_static! { /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network - /// with `MAX_RPC_SIZE_POST_MERGE`. + /// with `max_chunk_size`. pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX @@ -109,25 +109,18 @@ lazy_static! { .len(); } -/// The maximum bytes that can be sent across the RPC pre-merge. -pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M -/// The maximum bytes that can be sent across the RPC post-merge. -pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M -pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; -/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). -const TTFB_TIMEOUT: u64 = 5; /// The number of seconds to wait for the first bytes of a request once a protocol has been /// established before the stream is terminated. const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. -pub fn max_rpc_size(fork_context: &ForkContext) -> usize { +pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize { match fork_context.current_fork() { - ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, - ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, - ForkName::Capella => MAX_RPC_SIZE_POST_CAPELLA, + ForkName::Altair | ForkName::Base => max_chunk_size / 10, + ForkName::Merge => max_chunk_size, + ForkName::Capella => max_chunk_size, } } @@ -262,6 +255,7 @@ pub struct RPCProtocol { pub max_rpc_size: usize, pub enable_light_client_server: bool, pub phantom: PhantomData, + pub ttfb_timeout: Duration, } impl UpgradeInfo for RPCProtocol { @@ -447,7 +441,7 @@ where } }; let mut timed_socket = TimeoutStream::new(socket); - timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); + timed_socket.set_read_timeout(Some(self.ttfb_timeout)); let socket = Framed::new(Box::pin(timed_socket), codec); diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 1a25beee0a..63e5bcbff6 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,6 +1,6 @@ use self::behaviour::Behaviour; use self::gossip_cache::GossipCache; -use crate::config::{gossipsub_config, NetworkLoad}; +use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, }; @@ -232,7 +232,15 @@ impl Network { max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 }; - config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); + let gossipsub_config_params = GossipsubConfigParams { + message_domain_valid_snappy: ctx.chain_spec.message_domain_valid_snappy, + gossip_max_size: ctx.chain_spec.gossip_max_size as usize, + }; + config.gs_config = gossipsub_config( + config.network_load, + ctx.fork_context.clone(), + gossipsub_config_params, + ); // If metrics are enabled for gossipsub build the configuration let gossipsub_metrics = ctx @@ -256,12 +264,18 @@ impl Network { (gossipsub, update_gossipsub_scores) }; + let network_params = NetworkParams { + max_chunk_size: ctx.chain_spec.max_chunk_size as usize, + ttfb_timeout: ctx.chain_spec.ttfb_timeout(), + resp_timeout: ctx.chain_spec.resp_timeout(), + }; let eth2_rpc = RPC::new( ctx.fork_context.clone(), config.enable_light_client_server, config.inbound_rate_limiter_config.clone(), config.outbound_rate_limiter_config.clone(), log.clone(), + network_params, ); let discovery = { diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index b48891335c..36a2e52385 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -94,6 +94,7 @@ pub async fn build_libp2p_instance( boot_nodes: Vec, log: slog::Logger, fork_name: ForkName, + spec: &ChainSpec, ) -> Libp2pInstance { let port = unused_tcp4_port().unwrap(); let config = build_config(port, boot_nodes); @@ -106,7 +107,7 @@ pub async fn build_libp2p_instance( config: &config, enr_fork_id: EnrForkId::default(), fork_context: Arc::new(fork_context(fork_name)), - chain_spec: &ChainSpec::minimal(), + chain_spec: spec, gossipsub_registry: None, }; Libp2pInstance( @@ -130,12 +131,13 @@ pub async fn build_node_pair( rt: Weak, log: &slog::Logger, fork_name: ForkName, + spec: &ChainSpec, ) -> (Libp2pInstance, Libp2pInstance) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); - let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name).await; - let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name).await; + let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec).await; + let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec).await; let receiver_multiaddr = receiver.local_enr().multiaddr()[1].clone(); @@ -180,10 +182,11 @@ pub async fn build_linear( log: slog::Logger, n: usize, fork_name: ForkName, + spec: &ChainSpec, ) -> Vec { let mut nodes = Vec::with_capacity(n); for _ in 0..n { - nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name).await); + nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name, spec).await); } let multiaddrs: Vec = nodes diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 656df0c4a1..05fa5ab854 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -9,8 +9,9 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, EthSpec, - ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, ChainSpec, EmptyBlock, + Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, + Slot, }; mod common; @@ -18,30 +19,30 @@ mod common; type E = MinimalEthSpec; /// Merge block with length < max_rpc_size. -fn merge_block_small(fork_context: &ForkContext) -> BeaconBlock { - let mut block = BeaconBlockMerge::::empty(&E::default_spec()); +fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { + let mut block = BeaconBlockMerge::::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); - assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context)); + assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); block } /// Merge block with length > MAX_RPC_SIZE. /// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. -fn merge_block_large(fork_context: &ForkContext) -> BeaconBlock { - let mut block = BeaconBlockMerge::::empty(&E::default_spec()); +fn merge_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { + let mut block = BeaconBlockMerge::::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); - assert!(block.ssz_bytes_len() > max_rpc_size(fork_context)); + assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); block } @@ -57,10 +58,12 @@ fn test_status_rpc() { let log = common::build_log(log_level, enable_logging); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // Dummy STATUS RPC message let rpc_request = Request::Status(StatusMessage { @@ -149,10 +152,12 @@ fn test_blocks_by_range_chunked_rpc() { let rt = Arc::new(Runtime::new().unwrap()); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); @@ -168,7 +173,7 @@ fn test_blocks_by_range_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); - let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); + let full_block = merge_block_small(&common::fork_context(ForkName::Merge), &spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_merge_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); @@ -273,16 +278,18 @@ fn test_blocks_by_range_over_limit() { let rt = Arc::new(Runtime::new().unwrap()); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); // BlocksByRange Response - let full_block = merge_block_large(&common::fork_context(ForkName::Merge)); + let full_block = merge_block_large(&common::fork_context(ForkName::Merge), &spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_merge_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); @@ -355,10 +362,12 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let rt = Arc::new(Runtime::new().unwrap()); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); @@ -475,10 +484,12 @@ fn test_blocks_by_range_single_empty_rpc() { let log = common::build_log(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10)); @@ -579,7 +590,7 @@ fn test_blocks_by_root_chunked_rpc() { // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await; // BlocksByRoot Request let rpc_request = @@ -601,7 +612,7 @@ fn test_blocks_by_root_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); - let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); + let full_block = merge_block_small(&common::fork_context(ForkName::Merge), &spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_merge_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); @@ -706,7 +717,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // BlocksByRoot Request let rpc_request = @@ -833,10 +844,13 @@ fn test_goodbye_rpc() { let log = common::build_log(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); + + let spec = E::default_spec(); + // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // build the sender future let sender_future = async { diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index cde4da9ffc..cb4d6f9c2b 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1634,6 +1634,7 @@ impl NetworkBeaconProcessor { attestation_verification::verify_propagation_slot_range( seen_clock, failed_att.attestation(), + &self.chain.spec, ); // Only penalize the peer if it would have been invalid at the moment we received @@ -2182,6 +2183,7 @@ impl NetworkBeaconProcessor { sync_committee_verification::verify_propagation_slot_range( seen_clock, &sync_committee_message_slot, + &self.chain.spec, ); hindsight_verification.is_err() }; @@ -2494,6 +2496,7 @@ impl NetworkBeaconProcessor { let is_timely = attestation_verification::verify_propagation_slot_range( &self.chain.slot_clock, attestation, + &self.chain.spec, ) .is_ok(); diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7f0ef1fb81..db83bfc164 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -577,14 +577,3 @@ impl NetworkBeaconProcessor> { (network_beacon_processor, beacon_processor_receive) } } - -#[cfg(test)] -mod test { - #[test] - fn queued_block_delay_is_sane() { - assert!( - beacon_processor::work_reprocessing_queue::ADDITIONAL_QUEUED_BLOCK_DELAY - < beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY - ); - } -} diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index b8d5db568e..dbe93de1ea 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -11,7 +11,7 @@ use crate::{ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconChain, ChainConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use beacon_chain::{BeaconChain, ChainConfig}; use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, @@ -215,7 +215,7 @@ impl TestRig { }; let network_beacon_processor = Arc::new(network_beacon_processor); - BeaconProcessor { + let beacon_processor = BeaconProcessor { network_globals, executor, max_workers: cmp::max(1, num_cpus::get()), @@ -229,8 +229,11 @@ impl TestRig { work_reprocessing_rx, Some(work_journal_tx), harness.chain.slot_clock.clone(), + chain.spec.maximum_gossip_clock_disparity(), ); + assert!(!beacon_processor.is_err()); + Self { chain, next_block: Arc::new(next_block), @@ -505,7 +508,7 @@ async fn import_gossip_block_acceptably_early() { rig.chain .slot_clock - .set_current_time(slot_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY); + .set_current_time(slot_start - rig.chain.spec.maximum_gossip_clock_disparity()); assert_eq!( rig.chain.slot().unwrap(), @@ -552,9 +555,9 @@ async fn import_gossip_block_unacceptably_early() { .start_of(rig.next_block.slot()) .unwrap(); - rig.chain - .slot_clock - .set_current_time(slot_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY - Duration::from_millis(1)); + rig.chain.slot_clock.set_current_time( + slot_start - rig.chain.spec.maximum_gossip_clock_disparity() - Duration::from_millis(1), + ); assert_eq!( rig.chain.slot().unwrap(), diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 4abf649bfb..c16b1675a9 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -581,8 +581,10 @@ pub fn get_config( }; } - client_config.chain.max_network_size = - lighthouse_network::gossip_max_size(spec.bellatrix_fork_epoch.is_some()); + client_config.chain.max_network_size = lighthouse_network::gossip_max_size( + spec.bellatrix_fork_epoch.is_some(), + spec.gossip_max_size as usize, + ); if cli_args.is_present("slasher") { let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 0fdc159ec2..8e7a9dd07a 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -89,4 +89,14 @@ DEPOSIT_CONTRACT_ADDRESS: 0x0B98057eA310F4d31F2a452B414647007d1645d9 # Network # --------------------------------------------------------------- -SUBNETS_PER_NODE: 4 \ No newline at end of file +SUBNETS_PER_NODE: 4 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 7b26b30a6c..98984f3b7d 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -89,4 +89,14 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Network # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 \ No newline at end of file +SUBNETS_PER_NODE: 2 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index 63b3d45db9..a0dd85fec0 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -89,4 +89,14 @@ DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b # Network # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 \ No newline at end of file +SUBNETS_PER_NODE: 2 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 8489f085f4..e3674cf7df 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -77,4 +77,14 @@ DEPOSIT_CONTRACT_ADDRESS: 0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D # Network # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 \ No newline at end of file +SUBNETS_PER_NODE: 2 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index ba15f6d488..f030f2e97a 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -60,6 +60,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } state_processing = { path = "../state_processing" } tokio = "1.14.0" +paste = "1.0.14" [features] default = ["sqlite", "legacy-arith"] diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index fbb6a3d857..a13d3116d8 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -6,6 +6,7 @@ use serde_derive::Deserialize; use serde_utils::quoted_u64::MaybeQuoted; use std::fs::File; use std::path::Path; +use std::time::Duration; use tree_hash::TreeHash; /// Each of the BLS signature domains. @@ -170,7 +171,15 @@ pub struct ChainSpec { pub attestation_subnet_count: u64, pub subnets_per_node: u8, pub epochs_per_subnet_subscription: u64, + pub gossip_max_size: u64, + pub min_epochs_for_block_requests: u64, + pub max_chunk_size: u64, + pub ttfb_timeout: u64, + pub resp_timeout: u64, + pub message_domain_invalid_snappy: [u8; 4], + pub message_domain_valid_snappy: [u8; 4], pub attestation_subnet_extra_bits: u8, + pub attestation_subnet_prefix_bits: u8, /* * Application params @@ -451,10 +460,16 @@ impl ChainSpec { Hash256::from(domain) } - #[allow(clippy::arithmetic_side_effects)] - pub const fn attestation_subnet_prefix_bits(&self) -> u32 { - let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2(); - self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits + pub fn maximum_gossip_clock_disparity(&self) -> Duration { + Duration::from_millis(self.maximum_gossip_clock_disparity_millis) + } + + pub fn ttfb_timeout(&self) -> Duration { + Duration::from_secs(self.ttfb_timeout) + } + + pub fn resp_timeout(&self) -> Duration { + Duration::from_secs(self.resp_timeout) } /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. @@ -617,8 +632,15 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_subnet_subscription: 256, - attestation_subnet_extra_bits: 0, - + gossip_max_size: default_gossip_max_size(), + min_epochs_for_block_requests: default_min_epochs_for_block_requests(), + max_chunk_size: default_max_chunk_size(), + ttfb_timeout: default_ttfb_timeout(), + resp_timeout: default_resp_timeout(), + message_domain_invalid_snappy: default_message_domain_invalid_snappy(), + message_domain_valid_snappy: default_message_domain_valid_snappy(), + attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), + attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), /* * Application specific */ @@ -842,7 +864,15 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_subnet_subscription: 256, - attestation_subnet_extra_bits: 0, + gossip_max_size: default_gossip_max_size(), + min_epochs_for_block_requests: default_min_epochs_for_block_requests(), + max_chunk_size: default_max_chunk_size(), + ttfb_timeout: default_ttfb_timeout(), + resp_timeout: default_resp_timeout(), + message_domain_invalid_snappy: default_message_domain_invalid_snappy(), + message_domain_valid_snappy: default_message_domain_valid_snappy(), + attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), + attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), /* * Application specific @@ -953,6 +983,34 @@ pub struct Config { #[serde(with = "serde_utils::quoted_u64")] deposit_network_id: u64, deposit_contract_address: Address, + + #[serde(default = "default_gossip_max_size")] + #[serde(with = "serde_utils::quoted_u64")] + gossip_max_size: u64, + #[serde(default = "default_min_epochs_for_block_requests")] + #[serde(with = "serde_utils::quoted_u64")] + min_epochs_for_block_requests: u64, + #[serde(default = "default_max_chunk_size")] + #[serde(with = "serde_utils::quoted_u64")] + max_chunk_size: u64, + #[serde(default = "default_ttfb_timeout")] + #[serde(with = "serde_utils::quoted_u64")] + ttfb_timeout: u64, + #[serde(default = "default_resp_timeout")] + #[serde(with = "serde_utils::quoted_u64")] + resp_timeout: u64, + #[serde(default = "default_message_domain_invalid_snappy")] + #[serde(with = "serde_utils::bytes_4_hex")] + message_domain_invalid_snappy: [u8; 4], + #[serde(default = "default_message_domain_valid_snappy")] + #[serde(with = "serde_utils::bytes_4_hex")] + message_domain_valid_snappy: [u8; 4], + #[serde(default = "default_attestation_subnet_extra_bits")] + #[serde(with = "serde_utils::quoted_u8")] + attestation_subnet_extra_bits: u8, + #[serde(default = "default_attestation_subnet_prefix_bits")] + #[serde(with = "serde_utils::quoted_u8")] + attestation_subnet_prefix_bits: u8, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -993,6 +1051,42 @@ fn default_subnets_per_node() -> u8 { 2u8 } +const fn default_gossip_max_size() -> u64 { + 10485760 +} + +const fn default_min_epochs_for_block_requests() -> u64 { + 33024 +} + +const fn default_max_chunk_size() -> u64 { + 10485760 +} + +const fn default_ttfb_timeout() -> u64 { + 5 +} + +const fn default_resp_timeout() -> u64 { + 10 +} + +const fn default_message_domain_invalid_snappy() -> [u8; 4] { + [0, 0, 0, 0] +} + +const fn default_message_domain_valid_snappy() -> [u8; 4] { + [1, 0, 0, 0] +} + +const fn default_attestation_subnet_extra_bits() -> u8 { + 0 +} + +const fn default_attestation_subnet_prefix_bits() -> u8 { + 6 +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); @@ -1088,6 +1182,16 @@ impl Config { deposit_chain_id: spec.deposit_chain_id, deposit_network_id: spec.deposit_network_id, deposit_contract_address: spec.deposit_contract_address, + + gossip_max_size: spec.gossip_max_size, + min_epochs_for_block_requests: spec.min_epochs_for_block_requests, + max_chunk_size: spec.max_chunk_size, + ttfb_timeout: spec.ttfb_timeout, + resp_timeout: spec.resp_timeout, + message_domain_invalid_snappy: spec.message_domain_invalid_snappy, + message_domain_valid_snappy: spec.message_domain_valid_snappy, + attestation_subnet_extra_bits: spec.attestation_subnet_extra_bits, + attestation_subnet_prefix_bits: spec.attestation_subnet_prefix_bits, } } @@ -1132,6 +1236,15 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gossip_max_size, + min_epochs_for_block_requests, + max_chunk_size, + ttfb_timeout, + resp_timeout, + message_domain_invalid_snappy, + message_domain_valid_snappy, + attestation_subnet_extra_bits, + attestation_subnet_prefix_bits, } = self; if preset_base != T::spec_name().to_string().as_str() { @@ -1169,6 +1282,15 @@ impl Config { terminal_block_hash, terminal_block_hash_activation_epoch, safe_slots_to_import_optimistically, + gossip_max_size, + min_epochs_for_block_requests, + max_chunk_size, + ttfb_timeout, + resp_timeout, + message_domain_invalid_snappy, + message_domain_valid_snappy, + attestation_subnet_extra_bits, + attestation_subnet_prefix_bits, ..chain_spec.clone() }) } @@ -1306,6 +1428,7 @@ mod tests { #[cfg(test)] mod yaml_tests { use super::*; + use paste::paste; use tempfile::NamedTempFile; #[test] @@ -1410,29 +1533,35 @@ mod yaml_tests { "#; let chain_spec: Config = serde_yaml::from_str(spec).unwrap(); - assert_eq!( - chain_spec.terminal_total_difficulty, - default_terminal_total_difficulty() - ); - assert_eq!( - chain_spec.terminal_block_hash, - default_terminal_block_hash() - ); - assert_eq!( - chain_spec.terminal_block_hash_activation_epoch, - default_terminal_block_hash_activation_epoch() - ); - assert_eq!( - chain_spec.safe_slots_to_import_optimistically, - default_safe_slots_to_import_optimistically() - ); + + // Asserts that `chain_spec.$name` and `default_$name()` are equal. + macro_rules! check_default { + ($name: ident) => { + paste! { + assert_eq!( + chain_spec.$name, + [](), + "{} does not match default", stringify!($name)); + } + }; + } + + check_default!(terminal_total_difficulty); + check_default!(terminal_block_hash); + check_default!(terminal_block_hash_activation_epoch); + check_default!(safe_slots_to_import_optimistically); + check_default!(bellatrix_fork_version); + check_default!(gossip_max_size); + check_default!(min_epochs_for_block_requests); + check_default!(max_chunk_size); + check_default!(ttfb_timeout); + check_default!(resp_timeout); + check_default!(message_domain_invalid_snappy); + check_default!(message_domain_valid_snappy); + check_default!(attestation_subnet_extra_bits); + check_default!(attestation_subnet_prefix_bits); assert_eq!(chain_spec.bellatrix_fork_epoch, None); - - assert_eq!( - chain_spec.bellatrix_fork_version, - default_bellatrix_fork_version() - ); } #[test] diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index eb25b57b0d..415d6a1404 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -84,7 +84,7 @@ impl SubnetId { let subscription_duration = spec.epochs_per_subnet_subscription; let node_id_prefix = - (node_id >> (256 - spec.attestation_subnet_prefix_bits() as usize)).as_usize(); + (node_id >> (256 - spec.attestation_subnet_prefix_bits as usize)).as_usize(); // NOTE: The as_u64() panics if the number is larger than u64::max_value(). This cannot be // true as spec.epochs_per_subnet_subscription is a u64. @@ -99,7 +99,7 @@ impl SubnetId { let permutation_seed = ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); - let num_subnets = 1 << spec.attestation_subnet_prefix_bits(); + let num_subnets = 1 << spec.attestation_subnet_prefix_bits; let permutated_prefix = compute_shuffled_index( node_id_prefix, num_subnets, diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 33aa8ad165..b98145163c 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -81,3 +81,17 @@ PROPOSER_SCORE_BOOST: 40 DEPOSIT_CHAIN_ID: 1 DEPOSIT_NETWORK_ID: 1 DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + +# Network +# --------------------------------------------------------------- +SUBNETS_PER_NODE: 2 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 From 31daf3a87cab1e3b57a2e3a3fddeb3998467c306 Mon Sep 17 00:00:00 2001 From: Nico Flaig Date: Mon, 7 Aug 2023 00:46:29 +0000 Subject: [PATCH 33/75] Update doppelganger note about sync committee contributions (#4425) **Motivation** As clarified [on discord](https://discord.com/channels/605577013327167508/605577013331361793/1121246688183603240), sync committee contributions are not delayed if DP is enabled. **Description** This PR updates doppelganger note about sync committee contributions. Based on the current docs, a user might assume that DP is not working as expected. --- book/src/validator-doppelganger.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index 7ce2868e9b..b62086d4bf 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -46,6 +46,8 @@ Staying silent and refusing to sign messages will cause the following: - Potentially missed rewards by missing a block proposal (if the validator is an elected block proposer, which is unlikely). +Notably, sync committee contributions are not slashable and will continue to be produced even when DP is suppressing other messages. + The loss of rewards and penalties incurred due to the missed duties will be very small in dollar-values. Neglecting block proposals, generally they will equate to around 0.00002 ETH (equivalent to USD 0.04 assuming ETH is trading at USD 2000), or less than 1% of the reward for one validator for one day. Since DP costs so little but can protect a user from From 521432129df2489797b302bf16b8452a12bafcc2 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 7 Aug 2023 22:53:04 +0000 Subject: [PATCH 34/75] Support SSZ request body for POST /beacon/blinded_blocks endpoints (v1 & v2) (#4504) ## Issue Addressed #4262 ## Proposed Changes add SSZ support in request body for POST /beacon/blinded_blocks endpoints (v1 & v2) ## Additional Info --- beacon_node/http_api/src/lib.rs | 99 ++++++++++++++++++- .../tests/broadcast_validation_tests.rs | 43 ++++++++ beacon_node/http_api/tests/tests.rs | 77 +++++++++++++++ common/eth2/src/lib.rs | 37 +++++++ 4 files changed, 255 insertions(+), 1 deletion(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 7d1475809a..739371c6ee 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1391,6 +1391,46 @@ pub fn serve( }, ); + // POST beacon/blocks + let post_beacon_blinded_blocks_ssz = + eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |block_bytes: Bytes, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + let block = + match SignedBeaconBlock::>::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) { + Ok(data) => data, + Err(e) => { + return Err(warp_utils::reject::custom_bad_request(format!( + "{:?}", + e + ))) + } + }; + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }, + ); + let post_beacon_blinded_blocks_v2 = eth_v2 .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) @@ -1428,6 +1468,58 @@ pub fn serve( }, ); + let post_beacon_blinded_blocks_v2_ssz = + eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + let block = + match SignedBeaconBlock::>::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) { + Ok(data) => data, + Err(_) => { + return warp::reply::with_status( + StatusCode::BAD_REQUEST, + eth2::StatusCode::BAD_REQUEST, + ) + .into_response(); + } + }; + match publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } + }, + ); + let block_id_or_err = warp::path::param::().or_else(|_| async { Err(warp_utils::reject::custom_bad_request( "Invalid block ID".to_string(), @@ -4073,7 +4165,12 @@ pub fn serve( warp::post().and( warp::header::exact("Content-Type", "application/octet-stream") // Routes which expect `application/octet-stream` go within this `and`. - .and(post_beacon_blocks_ssz.uor(post_beacon_blocks_v2_ssz)) + .and( + post_beacon_blocks_ssz + .uor(post_beacon_blocks_v2_ssz) + .uor(post_beacon_blinded_blocks_ssz) + .uor(post_beacon_blinded_blocks_v2_ssz), + ) .uor(post_beacon_blocks) .uor(post_beacon_blinded_blocks) .uor(post_beacon_blocks_v2) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 457276d702..0082589000 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -851,6 +851,49 @@ pub async fn blinded_gossip_full_pass() { .block_is_known_to_fork_choice(&block.canonical_root())); } +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_full_pass_ssz() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2_ssz(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn blinded_consensus_invalid() { diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 28eb106e8d..efdf66747d 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2578,6 +2578,66 @@ impl ApiTester { } } + pub async fn test_blinded_block_production_ssz>(&self) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block = self + .client + .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .await + .unwrap() + .data; + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blinded_blocks_ssz(&signed_block) + .await + .unwrap(); + + // This converts the generic `Payload` to a concrete type for comparison. + let head_block = SignedBeaconBlock::from(signed_block.clone()); + assert_eq!(head_block, signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + } + pub async fn test_blinded_block_production_no_verify_randao>( self, ) -> Self { @@ -4704,6 +4764,14 @@ async fn blinded_block_production_full_payload_premerge() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_ssz_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_ssz::>() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_with_skip_slots_full_payload_premerge() { ApiTester::new() @@ -4713,6 +4781,15 @@ async fn blinded_block_production_with_skip_slots_full_payload_premerge() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production_ssz::>() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_no_verify_randao_full_payload_premerge() { ApiTester::new() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 661f9a09eb..146a832e38 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -742,6 +742,26 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/blinded_blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_blinded_blocks_ssz>( + &self, + block: &SignedBeaconBlock, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blinded_blocks"); + + self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal)) + .await?; + + Ok(()) + } + pub fn post_beacon_blocks_v2_path( &self, validation_level: Option, @@ -829,6 +849,23 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST v2/beacon/blinded_blocks` + pub async fn post_beacon_blinded_blocks_v2_ssz( + &self, + block: &SignedBlindedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version_and_ssz_body( + self.post_beacon_blinded_blocks_v2_path(validation_level)?, + block.as_ssz_bytes(), + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + /// Path for `v2/beacon/blocks` pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V2)?; From 5ea75052a87e994a0203070533b0d54c611978b1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 7 Aug 2023 22:53:05 +0000 Subject: [PATCH 35/75] Increase slashing protection test SQL timeout to 500ms (#4574) ## Issue Addressed NA ## Proposed Changes We've been seeing a lot of [CI failures](https://github.com/sigp/lighthouse/actions/runs/5781296217/job/15666209142) with errors like this: ``` ---- extra_interchange_tests::export_same_key_twice stdout ---- thread 'extra_interchange_tests::export_same_key_twice' panicked at 'called `Result::unwrap()` on an `Err` value: SQLError("Unable to open database: Error(None)")', validator_client/slashing_protection/src/extra_interchange_tests.rs:48:67 ``` I'm assuming they're timeouts. I noticed that tests have a 0.1s timeout. Perhaps this just doesn't cut it when our new runners are overloaded. ## Additional Info NA --- validator_client/slashing_protection/src/slashing_database.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index c8be851472..406913bfd1 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -23,7 +23,7 @@ pub const POOL_SIZE: u32 = 1; #[cfg(not(test))] pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); #[cfg(test)] -pub const CONNECTION_TIMEOUT: Duration = Duration::from_millis(100); +pub const CONNECTION_TIMEOUT: Duration = Duration::from_millis(500); /// Supported version of the interchange format. pub const SUPPORTED_INTERCHANGE_FORMAT_VERSION: u64 = 5; From 1373dcf076bd031e1c09119281c5f84b789974c2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 8 Aug 2023 00:03:22 +0000 Subject: [PATCH 36/75] Add `validator-manager` (#3502) ## Issue Addressed Addresses #2557 ## Proposed Changes Adds the `lighthouse validator-manager` command, which provides: - `lighthouse validator-manager create` - Creates a `validators.json` file and a `deposits.json` (same format as https://github.com/ethereum/staking-deposit-cli) - `lighthouse validator-manager import` - Imports validators from a `validators.json` file to the VC via the HTTP API. - `lighthouse validator-manager move` - Moves validators from one VC to the other, utilizing only the VC API. ## Additional Info In 98bcb947c I've reduced some VC `ERRO` and `CRIT` warnings to `WARN` or `DEBG` for the case where a pubkey is missing from the validator store. These were being triggered when we removed a validator but still had it in caches. It seems to me that `UnknownPubkey` will only happen in the case where we've removed a validator, so downgrading the logs is prudent. All the logs are `DEBG` apart from attestations and blocks which are `WARN`. I thought having *some* logging about this condition might help us down the track. In https://github.com/sigp/lighthouse/pull/3502/commits/856cd7e37db11e0318dff9db0979d67b79c2a633 I've made the VC delete the corresponding password file when it's deleting a keystore. This seemed like nice hygiene. Notably, it'll only delete that password file after it scans the validator definitions and finds that no other validator is also using that password file. --- Cargo.lock | 30 + Cargo.toml | 2 + account_manager/Cargo.toml | 2 + account_manager/src/common.rs | 50 +- account_manager/src/validator/import.rs | 8 +- account_manager/src/validator/recover.rs | 3 +- account_manager/src/wallet/recover.rs | 2 +- book/src/SUMMARY.md | 5 +- book/src/key-management.md | 27 +- book/src/validator-management.md | 4 + book/src/validator-manager-create.md | 206 +++ book/src/validator-manager-move.md | 188 +++ book/src/validator-manager.md | 35 + common/account_utils/src/lib.rs | 57 + .../src/validator_definitions.rs | 61 +- common/eth2/src/lighthouse_vc/http_client.rs | 15 + common/eth2/src/lighthouse_vc/std_types.rs | 5 +- common/eth2/src/lighthouse_vc/types.rs | 16 + common/validator_dir/Cargo.toml | 1 + common/validator_dir/src/builder.rs | 23 +- common/validator_dir/src/lib.rs | 4 +- common/validator_dir/src/validator_dir.rs | 6 +- consensus/types/src/lib.rs | 2 + consensus/types/src/withdrawal_credentials.rs | 57 + lighthouse/Cargo.toml | 2 + lighthouse/src/main.rs | 11 + lighthouse/tests/main.rs | 1 + lighthouse/tests/validator_client.rs | 26 + lighthouse/tests/validator_manager.rs | 344 +++++ validator_client/Cargo.toml | 2 - validator_client/src/attestation_service.rs | 30 +- validator_client/src/block_service.rs | 55 +- validator_client/src/cli.rs | 20 + validator_client/src/config.rs | 8 + validator_client/src/duties_service.rs | 14 + validator_client/src/duties_service/sync.rs | 13 + .../src/http_api/create_validator.rs | 37 +- validator_client/src/http_api/keystores.rs | 87 +- validator_client/src/http_api/mod.rs | 121 +- validator_client/src/http_api/test_utils.rs | 631 +++++++++ validator_client/src/http_api/tests.rs | 721 +++++----- .../src/http_api/tests/keystores.rs | 405 +++--- .../src/initialized_validators.rs | 160 ++- validator_client/src/key_cache.rs | 6 + validator_client/src/lib.rs | 10 +- validator_client/src/notifier.rs | 3 +- validator_client/src/preparation_service.rs | 19 +- .../src/sync_committee_service.rs | 29 +- validator_client/src/validator_store.rs | 6 +- validator_manager/Cargo.toml | 30 + validator_manager/src/common.rs | 361 +++++ validator_manager/src/create_validators.rs | 934 ++++++++++++ validator_manager/src/import_validators.rs | 436 ++++++ validator_manager/src/lib.rs | 85 ++ validator_manager/src/move_validators.rs | 1253 +++++++++++++++++ validator_manager/test_vectors/.gitignore | 1 + validator_manager/test_vectors/generate.py | 123 ++ .../deposit_data-1660803666.json | 1 + .../deposit_data-1660803669.json | 1 + .../deposit_data-1660803684.json | 1 + .../deposit_data-1660803679.json | 1 + .../deposit_data-1660803672.json | 1 + .../deposit_data-1660803675.json | 1 + .../deposit_data-1660803687.json | 1 + .../deposit_data-1660803690.json | 1 + .../deposit_data-1660803705.json | 1 + .../deposit_data-1660803701.json | 1 + .../deposit_data-1660803693.json | 1 + .../deposit_data-1660803696.json | 1 + 69 files changed, 6060 insertions(+), 745 deletions(-) create mode 100644 book/src/validator-manager-create.md create mode 100644 book/src/validator-manager-move.md create mode 100644 book/src/validator-manager.md create mode 100644 consensus/types/src/withdrawal_credentials.rs create mode 100644 lighthouse/tests/validator_manager.rs create mode 100644 validator_client/src/http_api/test_utils.rs create mode 100644 validator_manager/Cargo.toml create mode 100644 validator_manager/src/common.rs create mode 100644 validator_manager/src/create_validators.rs create mode 100644 validator_manager/src/import_validators.rs create mode 100644 validator_manager/src/lib.rs create mode 100644 validator_manager/src/move_validators.rs create mode 100644 validator_manager/test_vectors/.gitignore create mode 100644 validator_manager/test_vectors/generate.py create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json diff --git a/Cargo.lock b/Cargo.lock index 13f2b7cd43..a24087c3a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,6 +30,8 @@ dependencies = [ "filesystem", "safe_arith", "sensitive_url", + "serde", + "serde_json", "slashing_protection", "slot_clock", "tempfile", @@ -4327,6 +4329,7 @@ dependencies = [ "env_logger 0.9.3", "environment", "eth1", + "eth2", "eth2_network_config", "ethereum_hashing", "futures", @@ -4349,6 +4352,7 @@ dependencies = [ "unused_port", "validator_client", "validator_dir", + "validator_manager", ] [[package]] @@ -8613,6 +8617,7 @@ dependencies = [ "bls", "deposit_contract", "derivative", + "directory", "eth2_keystore", "filesystem", "hex", @@ -8623,6 +8628,31 @@ dependencies = [ "types", ] +[[package]] +name = "validator_manager" +version = "0.1.0" +dependencies = [ + "account_utils", + "bls", + "clap", + "clap_utils", + "environment", + "eth2", + "eth2_keystore", + "eth2_network_config", + "eth2_wallet", + "ethereum_serde_utils", + "hex", + "regex", + "serde", + "serde_json", + "tempfile", + "tokio", + "tree_hash", + "types", + "validator_client", +] + [[package]] name = "valuable" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index cb09d26a5d..15906a0306 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,6 +83,8 @@ members = [ "validator_client", "validator_client/slashing_protection", + "validator_manager", + "watch", ] resolver = "2" diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index ce863f9147..7d90cbb427 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -24,6 +24,8 @@ safe_arith = {path = "../consensus/safe_arith"} slot_clock = { path = "../common/slot_clock" } filesystem = { path = "../common/filesystem" } sensitive_url = { path = "../common/sensitive_url" } +serde = { version = "1.0.116", features = ["derive"] } +serde_json = "1.0.58" [dev-dependencies] tempfile = "3.1.0" diff --git a/account_manager/src/common.rs b/account_manager/src/common.rs index ce42615e50..0764db21f3 100644 --- a/account_manager/src/common.rs +++ b/account_manager/src/common.rs @@ -1,55 +1,7 @@ -use account_utils::PlainText; -use account_utils::{read_input_from_user, strip_off_newlines}; -use eth2_wallet::bip39::{Language, Mnemonic}; -use std::fs; -use std::path::PathBuf; -use std::str::from_utf8; -use std::thread::sleep; -use std::time::Duration; +use account_utils::read_input_from_user; -pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:"; -pub fn read_mnemonic_from_cli( - mnemonic_path: Option, - stdin_inputs: bool, -) -> Result { - let mnemonic = match mnemonic_path { - Some(path) => fs::read(&path) - .map_err(|e| format!("Unable to read {:?}: {:?}", path, e)) - .and_then(|bytes| { - let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into(); - let phrase = from_utf8(bytes_no_newlines.as_ref()) - .map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?; - Mnemonic::from_phrase(phrase, Language::English).map_err(|e| { - format!( - "Unable to derive mnemonic from string {:?}: {:?}", - phrase, e - ) - }) - })?, - None => loop { - eprintln!(); - eprintln!("{}", MNEMONIC_PROMPT); - - let mnemonic = read_input_from_user(stdin_inputs)?; - - match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) { - Ok(mnemonic_m) => { - eprintln!("Valid mnemonic provided."); - eprintln!(); - sleep(Duration::from_secs(1)); - break mnemonic_m; - } - Err(_) => { - eprintln!("Invalid mnemonic"); - } - } - }, - }; - Ok(mnemonic) -} - /// Reads in a wallet name from the user. If the `--wallet-name` flag is provided, use it. Otherwise /// read from an interactive prompt using tty unless the `--stdin-inputs` flag is provided. pub fn read_wallet_name_from_cli( diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 8dc50a9df1..339d9a2914 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -4,8 +4,8 @@ use account_utils::{ eth2_keystore::Keystore, read_password_from_user, validator_definitions::{ - recursively_find_voting_keystores, ValidatorDefinition, ValidatorDefinitions, - CONFIG_FILENAME, + recursively_find_voting_keystores, PasswordStorage, ValidatorDefinition, + ValidatorDefinitions, CONFIG_FILENAME, }, ZeroizeString, }; @@ -277,7 +277,9 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let suggested_fee_recipient = None; let validator_def = ValidatorDefinition::new_keystore_with_password( &dest_keystore, - password_opt, + password_opt + .map(PasswordStorage::ValidatorDefinitions) + .unwrap_or(PasswordStorage::None), graffiti, suggested_fee_recipient, None, diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index d9b05e7756..33d3b18926 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -1,10 +1,9 @@ use super::create::STORE_WITHDRAW_FLAG; -use crate::common::read_mnemonic_from_cli; use crate::validator::create::COUNT_FLAG; use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::SECRETS_DIR_FLAG; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; -use account_utils::random_password; +use account_utils::{random_password, read_mnemonic_from_cli}; use clap::{App, Arg, ArgMatches}; use directory::ensure_dir_exists; use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; diff --git a/account_manager/src/wallet/recover.rs b/account_manager/src/wallet/recover.rs index f107c3638c..6e047aca8d 100644 --- a/account_manager/src/wallet/recover.rs +++ b/account_manager/src/wallet/recover.rs @@ -1,6 +1,6 @@ -use crate::common::read_mnemonic_from_cli; use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; +use account_utils::read_mnemonic_from_cli; use clap::{App, Arg, ArgMatches}; use std::path::PathBuf; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7431d22387..507896f431 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -12,6 +12,9 @@ * [Run a Node](./run_a_node.md) * [Become a Validator](./mainnet-validator.md) * [Validator Management](./validator-management.md) + * [The `validator-manager` Command](./validator-manager.md) + * [Creating validators](./validator-manager-create.md) + * [Moving validators](./validator-manager-move.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Partial Withdrawals](./partial-withdrawal.md) @@ -41,7 +44,7 @@ * [Remote Signing with Web3Signer](./validator-web3signer.md) * [Database Configuration](./advanced_database.md) * [Database Migrations](./database-migrations.md) - * [Key Management](./key-management.md) + * [Key Management (Deprecated)](./key-management.md) * [Key Recovery](./key-recovery.md) * [Advanced Networking](./advanced_networking.md) * [Running a Slasher](./slasher.md) diff --git a/book/src/key-management.md b/book/src/key-management.md index cebd84649d..b2bb7737fd 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -1,9 +1,30 @@ -# Key Management +# Key Management (Deprecated) [launchpad]: https://launchpad.ethereum.org/ -> -> **Note: While Lighthouse is able to generate the validator keys and the deposit data file to submit to the deposit contract, we strongly recommend using the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) to create validators keys and the deposit data file. This is because the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) has the option to assign a withdrawal address during the key generation process, while Lighthouse wallet will always generate keys with withdrawal credentials of type 0x00. This means that users who created keys using Lighthouse will have to update their withdrawal credentials in the future to enable withdrawals. In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only `*.json` file. This means that users have to directly interact with the deposit contract to be able to submit the deposit if they were to generate the files using Lighthouse.** +**⚠️ The information on this page refers to tooling and process that have been deprecated. Please read the "Deprecation Notice". ⚠️** + +## Deprecation Notice + +This page recommends the use of the `lighthouse account-manager` tool to create +validators. This tool will always generate keys with the withdrawal credentials +of type `0x00`. This means the users who created keys using `lighthouse +account-manager` will have to update their withdrawal credentials in a +separate step to receive staking rewards. + +In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, +which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only +`*.json` file. This means that users have to directly interact with the deposit +contract to be able to submit the deposit if they were to generate the files +using Lighthouse. + +Rather than continuing to read this page, we recommend users visit either: + +- The [Staking Launchpad][launchpad] for detailed, beginner-friendly instructions. +- The [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) for a CLI tool used by the [Staking Launchpad][launchpad]. +- The [validator-manager documentation](./validator-manager.md) for a Lighthouse-specific tool for streamlined validator management tools. + +## The `lighthouse account-manager` Lighthouse uses a _hierarchical_ key management system for producing validator keys. It is hierarchical because each validator key can be _derived_ from a diff --git a/book/src/validator-management.md b/book/src/validator-management.md index be34fef2c3..df7c2ac476 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -13,6 +13,10 @@ standard directories and do not start their `lighthouse vc` with the this document. However, users with more complex needs may find this document useful. +The [lighthouse validator-manager](./validator-manager.md) command can be used +to create and import validators to a Lighthouse VC. It can also be used to move +validators between two Lighthouse VCs. + ## Introducing the `validator_definitions.yml` file The `validator_definitions.yml` file is located in the `validator-dir`, which diff --git a/book/src/validator-manager-create.md b/book/src/validator-manager-create.md new file mode 100644 index 0000000000..779c159276 --- /dev/null +++ b/book/src/validator-manager-create.md @@ -0,0 +1,206 @@ +# Creating and Importing Validators + +[Ethereum Staking Launchpad]: https://launchpad.ethereum.org/en/ + +The `lighthouse validator-manager create` command derives validators from a +mnemonic and produces two files: + +- `validators.json`: the keystores and passwords for the newly generated + validators, in JSON format. +- `deposits.json`: a JSON file of the same format as + [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) which can + be used for deposit submission via the [Ethereum Staking + Launchpad][]. + +The `lighthouse validator-manager import` command accepts a `validators.json` +file (from the `create` command) and submits those validators to a running +Lighthouse Validator Client via the HTTP API. + +These two commands enable a workflow of: + +1. Creating the validators via the `create` command. +1. Importing the validators via the `import` command. +1. Depositing validators via the [Ethereum Staking + Launchpad][]. + +The separation of the `create` and `import` commands allows for running the +`create` command on an air-gapped host whilst performing the `import` command on +an internet-connected host. + +The `create` and `import` commands are recommended for advanced users who are +familiar with command line tools and the practicalities of managing sensitive +cryptographic material. **We recommend that novice users follow the workflow on +[Ethereum Staking Launchpad][] rather than using the `create` and `import` +commands.** + +## Simple Example + +Create validators from a mnemonic with: + +```bash +lighthouse \ + validator-manager \ + create \ + --network mainnet \ + --first-index 0 \ + --count 2 \ + --eth1-withdrawal-address

\ + --suggested-fee-recipient
\ + --output-path ./ +``` +> If the flag `--first-index` is not provided, it will default to using index 0. +> The `--suggested-fee-recipient` flag may be omitted to use whatever default +> value the VC uses. It does not necessarily need to be identical to +> `--eth1-withdrawal-address`. +> The command will create the `deposits.json` and `validators.json` in the present working directory. If you would like these files to be created in a different directory, change the value of `output-path`, for example `--output-path /desired/directory`. The directory will be created if the path does not exist. + +Then, import the validators to a running VC with: + +```bash +lighthouse \ + validator-manager \ + import \ + --validators-file validators.json \ + --vc-token +``` +> This is assuming that `validators.json` is in the present working directory. If it is not, insert the directory of the file. +> Be sure to remove `./validators.json` after the import is successful since it +> contains unencrypted validator keystores. + +## Detailed Guide + +This guide will create two validators and import them to a VC. For simplicity, +the same host will be used to generate the keys and run the VC. In reality, +users may want to perform the `create` command on an air-gapped machine and then +move the `validators.json` and `deposits.json` files to an Internet-connected +host. This would help protect the mnemonic from being exposed to the Internet. + +### 1. Create the Validators + +Run the `create` command, substituting `
` for an execution address that +you control. This is where all the staked ETH and rewards will ultimately +reside, so it's very important that this address is secure, accessible and +backed-up. The `create` command: + +```bash +lighthouse \ + validator-manager \ + create \ + --first-index 0 \ + --count 2 \ + --eth1-withdrawal-address
\ + --output-path ./ +``` + +If successful, the command output will appear like below: + +```bash +Running validator manager for mainnet network + +Enter the mnemonic phrase: + +Valid mnemonic provided. + +Starting derivation of 2 keystores. Each keystore may take several seconds. +Completed 1/2: 0x8885c29b8f88ee9b9a37b480fd4384fed74bda33d85bc8171a904847e65688b6c9bb4362d6597fd30109fb2def6c3ae4 +Completed 2/2: 0xa262dae3dcd2b2e280af534effa16bedb27c06f2959e114d53bd2a248ca324a018dc73179899a066149471a94a1bc92f +Keystore generation complete +Writing "./validators.json" +Writing "./deposits.json" +``` + +This command will create validators at indices `0, 1`. The exact indices created +can be influenced with the `--first-index` and `--count` flags. Use these flags +with caution to prevent creating the same validator twice, this may result in a +slashing! + +The command will create two files: + +- `./deposits.json`: this file does *not* contain sensitive information and may be uploaded to the [Ethereum Staking Launchpad]. +- `./validators.json`: this file contains **sensitive unencrypted validator keys, do not share it with anyone or upload it to any website**. + +### 2. Import the validators + +The VC which will receive the validators needs to have the following flags at a minimum: + +- `--http` +- `--http-port 5062` +- `--enable-doppelganger-protection` + +Therefore, the VC command might look like: + +```bash +lighthouse \ + vc \ + --http \ + --http-port 5062 \ + --enable-doppelganger-protection +``` + +In order to import the validators, the location of the VC `api-token.txt` file +must be known. The location of the file varies, but it is located in the +"validator directory" of your data directory. For example: +`~/.lighthouse/mainnet/validators/api-token.txt`. We will use `` +to subsitute this value. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path. + + +Once the VC is running, use the `import` command to import the validators to the VC: + +```bash +lighthouse \ + validator-manager \ + import \ + --validators-file validators.json \ + --vc-token +``` + +If successful, the command output will appear like below: + +```bash +Running validator manager for mainnet network +Validator client is reachable at http://localhost:5062/ and reports 0 validators +Starting to submit 2 validators to VC, each validator may take several seconds +Uploaded keystore 1 of 2 to the VC +Uploaded keystore 2 of 2 to the VC +``` + +The user should now *securely* delete the `validators.json` file (e.g., `shred -u validators.json`). +The `validators.json` contains the unencrypted validator keys and must not be +shared with anyone. +At the same time, `lighthouse vc` will log: +```bash +INFO Importing keystores via standard HTTP API, count: 1 +WARN No slashing protection data provided with keystores +INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore +INFO Modified key_cache saved successfully +``` +The WARN message means that the `validators.json` file does not contain the slashing protection data. This is normal if you are starting a new validator. The flag `--enable-doppelganger-protection` will also protect users from potential slashing risk. +The validators will now go through 2-3 epochs of [doppelganger +protection](./validator-doppelganger.md) and will automatically start performing +their duties when they are deposited and activated. + +If the host VC contains the same public key as the `validators.json` file, an error will be shown and the `import` process will stop: + +```bash +Duplicate validator 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f already exists on the destination validator client. This may indicate that some validators are running in two places at once, which can lead to slashing. If you are certain that there is no risk, add the --ignore-duplicates flag. +Err(DuplicateValidator(0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f)) +``` + +If you are certain that it is safe, you can add the flag `--ignore-duplicates` in the `import` command. The command becomes: + +```bash +lighthouse \ + validator-manager \ + import \ + --validators-file validators.json \ + --vc-token \ + --ignore-duplicates +``` +and the output will be as follows: + +```bash +Duplicate validators are ignored, ignoring 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f which exists on the destination validator client +Re-uploaded keystore 1 of 6 to the VC +``` + +The guide is complete. \ No newline at end of file diff --git a/book/src/validator-manager-move.md b/book/src/validator-manager-move.md new file mode 100644 index 0000000000..98932604d5 --- /dev/null +++ b/book/src/validator-manager-move.md @@ -0,0 +1,188 @@ +# Moving Validators + +The `lighthouse validator-manager move` command uses the VC HTTP API to move +validators from one VC (the "src" VC) to another VC (the "dest" VC). The move +operation is *comprehensive*; it will: + +- Disable the validators on the src VC. +- Remove the validator keystores from the src VC file system. +- Export the slashing database records for the appropriate validators from the src VC to the dest VC. +- Enable the validators on the dest VC. +- Generally result in very little or no validator downtime. + +It is capable of moving all validators on the src VC, a count of validators or +a list of pubkeys. + +The `move` command is only guaranteed to work between two Lighthouse VCs (i.e., +there is no guarantee that the commands will work between Lighthouse and Teku, for instance). + +The `move` command only supports moving validators using a keystore on the local +file system, it does not support `Web3Signer` validators. + +Although all efforts are taken to avoid it, it's possible for the `move` command +to fail in a way that removes the validator from the src VC without adding it to the +dest VC. Therefore, it is recommended to **never use the `move` command without +having a backup of all validator keystores (e.g. the mnemonic).** + +## Simple Example + +The following command will move all validators from the VC running at +`http://localhost:6062` to the VC running at `http://localhost:5062`. + +```bash +lighthouse \ + validator-manager \ + move \ + --src-vc-url http://localhost:6062 \ + --src-vc-token ~/src-token.txt \ + --dest-vc-url http://localhost:5062 \ + --dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \ + --validators all +``` + +## Detailed Guide + +This guide describes the steps to move validators between two validator clients (VCs) which are +able to SSH between each other. This guide assumes experience with the Linux command line and SSH +connections. + +There will be two VCs in this example: + +- The *source* VC which contains the validators/keystores to be moved. +- The *destination* VC which is to take the validators/keystores from the source. + +There will be two hosts in this example: + +- Host 1 (*"source host"*): Is running the `src-vc`. +- Host 2 (*"destination host"*): Is running the `dest-vc`. + +The example assumes +that Host 1 is able to SSH to Host 2. + +In reality, many host configurations are possible. For example: + +- Both VCs on the same host. +- Both VCs on different hosts and the `validator-manager` being used on a third host. + +### 1. Configure the Source VC + +The source VC needs to have the following flags at a minimum: + +- `--http` +- `--http-port 5062` +- `--http-allow-keystore-export` + +Therefore, the source VC command might look like: + +```bash +lighthouse \ + vc \ + --http \ + --http-port 5062 \ + --http-allow-keystore-export +``` + +### 2. Configure the Destination VC + +The destination VC needs to have the following flags at a minimum: + +- `--http` +- `--http-port 5062` +- `--enable-doppelganger-protection` + +Therefore, the destination VC command might look like: + +```bash +lighthouse \ + vc \ + --http \ + --http-port 5062 \ + --enable-doppelganger-protection +``` + +> The `--enable-doppelganger-protection` flag is not *strictly* required, however +> it is recommended for an additional layer of safety. It will result in 2-3 +> epochs of downtime for the validator after it is moved, which is generally an +> inconsequential cost in lost rewards or penalties. +> +> Optionally, users can add the `--http-store-passwords-in-secrets-dir` flag if they'd like to have +> the import validator keystore passwords stored in separate files rather than in the +> `validator-definitions.yml` file. If you don't know what this means, you can safely omit the flag. + +### 3. Obtain the Source API Token + +The VC API is protected by an *API token*. This is stored in a file on each of the hosts. Since +we'll be running our command on the destination host, it will need to have the API token for the +source host on its file-system. + +On the **source host**, find the location of the `api-token.txt` file and copy the contents. The +location of the file varies, but it is located in the "validator directory" of your data directory, +alongside validator keystores. For example: `~/.lighthouse/mainnet/validators/api-token.txt`. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path. + +Copy the contents of that file into a new file on the **destination host** at `~/src-token.txt`. The +API token should be similar to `api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123`. + +### 4. Create an SSH Tunnel + +In the **source host**, open a terminal window, SSH to the **destination host** and establish a reverse-SSH connection +between the **destination host** and the **source host**. + +```bash +ssh dest-host +ssh -L 6062:localhost:5062 src-host +``` + +It's important that you leave this session open throughout the rest of this tutorial. If you close +this terminal window then the connection between the destination and source host will be lost. + +### 5. Move + +With the SSH tunnel established between the `dest-host` and `src-host`, from the **destination +host** run the command to move the validators: + +```bash +lighthouse \ + validator-manager \ + move \ + --src-vc-url http://localhost:6062 \ + --src-vc-token ~/src-token.txt \ + --dest-vc-url http://localhost:5062 \ + --dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \ + --validators all +``` + +The command will provide information about the progress of the operation and +emit `Done.` when the operation has completed successfully. For example: + +```bash +Running validator manager for mainnet network +Validator client is reachable at http://localhost:5062/ and reports 2 validators +Validator client is reachable at http://localhost:6062/ and reports 0 validators +Moved keystore 1 of 2 +Moved keystore 2 of 2 +Done. +``` +At the same time, `lighthouse vc` will log: +```bash +INFO Importing keystores via standard HTTP API, count: 1 +INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore +INFO Modified key_cache saved successfully +Once the operation completes successfully, there is nothing else to be done. The +validators have been removed from the `src-host` and enabled at the `dest-host`. +If the `--enable-doppelganger-protection` flag was used it may take 2-3 epochs +for the validators to start attesting and producing blocks on the `dest-host`. +If you would only like to move some validators, you can replace the flag `--validators all` with one or more validator public keys. For example: + +```bash +lighthouse \ + validator-manager \ + move \ + --src-vc-url http://localhost:6062 \ + --src-vc-token ~/src-token.txt \ + --dest-vc-url http://localhost:5062 \ + --dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \ + --validators 0x9096aab771e44da149bd7c9926d6f7bb96ef465c0eeb4918be5178cd23a1deb4aec232c61d85ff329b54ed4a3bdfff3a,0x90fc4f72d898a8f01ab71242e36f4545aaf87e3887be81632bb8ba4b2ae8fb70753a62f866344d7905e9a07f5a9cdda1 +``` +Any errors encountered during the operation should include information on how to +proceed. Assistance is also available on our +[Discord](https://discord.gg/cyAszAh). \ No newline at end of file diff --git a/book/src/validator-manager.md b/book/src/validator-manager.md new file mode 100644 index 0000000000..e3cb74bd66 --- /dev/null +++ b/book/src/validator-manager.md @@ -0,0 +1,35 @@ +# Validator Manager + +[Ethereum Staking Launchpad]: https://launchpad.ethereum.org/en/ +[Import Validators]: #import-validators + +## Introduction + +The `lighthouse validator-manager` tool provides utilities for managing validators on a *running* +Lighthouse Validator Client. The validator manager performs operations via the HTTP API of the +validator client (VC). Due to limitations of the +[keymanager-APIs](https://ethereum.github.io/keymanager-APIs/), only Lighthouse VCs are fully +supported by this command. + +The validator manager tool is similar to the `lighthouse account-manager` tool, +except the latter creates files that will be read by the VC next time it starts +whilst the former makes instant changes to a live VC. + +The `account-manager` is ideal for importing keys created with the +[staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli). On the +other hand, the `validator-manager` is ideal for moving existing validators +between two VCs or for advanced users to create validators at scale with less +downtime. + +The `validator-manager` boasts the following features: + +- One-line command to arbitrarily move validators between two VCs, maintaining the slashing protection database. +- Generates deposit files compatible with the [Ethereum Staking Launchpad][]. +- Generally involves zero or very little downtime. +- The "key cache" is preserved whenever a validator is added with the validator + manager, preventing long waits at start up when a new validator is added. + +## Guides + +- [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md) +- [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) \ No newline at end of file diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 89de380385..e566d7cdda 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -13,6 +13,9 @@ use std::fs::{self, File}; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; +use std::str::from_utf8; +use std::thread::sleep; +use std::time::Duration; use zeroize::Zeroize; pub mod validator_definitions; @@ -30,6 +33,8 @@ pub const MINIMUM_PASSWORD_LEN: usize = 12; /// array of length 32. const DEFAULT_PASSWORD_LEN: usize = 48; +pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; + /// Returns the "default" path where a wallet should store its password file. pub fn default_wallet_password_path>(wallet_name: &str, secrets_dir: P) -> PathBuf { secrets_dir.as_ref().join(format!("{}.pass", wallet_name)) @@ -59,6 +64,18 @@ pub fn read_password>(path: P) -> Result { fs::read(path).map(strip_off_newlines).map(Into::into) } +/// Reads a password file into a `ZeroizeString` struct, with new-lines removed. +pub fn read_password_string>(path: P) -> Result { + fs::read(path) + .map_err(|e| format!("Error opening file: {:?}", e)) + .map(strip_off_newlines) + .and_then(|bytes| { + String::from_utf8(bytes) + .map_err(|e| format!("Error decoding utf8: {:?}", e)) + .map(Into::into) + }) +} + /// Write a file atomically by using a temporary file as an intermediate. /// /// Care is taken to preserve the permissions of the file at `file_path` being written. @@ -220,6 +237,46 @@ impl AsRef<[u8]> for ZeroizeString { } } +pub fn read_mnemonic_from_cli( + mnemonic_path: Option, + stdin_inputs: bool, +) -> Result { + let mnemonic = match mnemonic_path { + Some(path) => fs::read(&path) + .map_err(|e| format!("Unable to read {:?}: {:?}", path, e)) + .and_then(|bytes| { + let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into(); + let phrase = from_utf8(bytes_no_newlines.as_ref()) + .map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?; + Mnemonic::from_phrase(phrase, Language::English).map_err(|e| { + format!( + "Unable to derive mnemonic from string {:?}: {:?}", + phrase, e + ) + }) + })?, + None => loop { + eprintln!(); + eprintln!("{}", MNEMONIC_PROMPT); + + let mnemonic = read_input_from_user(stdin_inputs)?; + + match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) { + Ok(mnemonic_m) => { + eprintln!("Valid mnemonic provided."); + eprintln!(); + sleep(Duration::from_secs(1)); + break mnemonic_m; + } + Err(_) => { + eprintln!("Invalid mnemonic"); + } + } + }, + }; + Ok(mnemonic) +} + #[cfg(test)] mod test { use super::*; diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 6ce2517fb2..c91e717d11 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -3,7 +3,9 @@ //! Serves as the source-of-truth of which validators this validator client should attempt (or not //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. -use crate::{default_keystore_password_path, write_file_via_temporary, ZeroizeString}; +use crate::{ + default_keystore_password_path, read_password_string, write_file_via_temporary, ZeroizeString, +}; use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; @@ -43,6 +45,18 @@ pub enum Error { UnableToOpenKeystore(eth2_keystore::Error), /// The validator directory could not be created. UnableToCreateValidatorDir(PathBuf), + UnableToReadKeystorePassword(String), + KeystoreWithoutPassword, +} + +/// Defines how a password for a validator keystore will be persisted. +pub enum PasswordStorage { + /// Store the password in the `validator_definitions.yml` file. + ValidatorDefinitions(ZeroizeString), + /// Store the password in a separate, dedicated file (likely in the "secrets" directory). + File(PathBuf), + /// Don't store the password at all. + None, } #[derive(Clone, PartialEq, Serialize, Deserialize, Hash, Eq)] @@ -92,6 +106,34 @@ impl SigningDefinition { pub fn is_local_keystore(&self) -> bool { matches!(self, SigningDefinition::LocalKeystore { .. }) } + + pub fn voting_keystore_password(&self) -> Result, Error> { + match self { + SigningDefinition::LocalKeystore { + voting_keystore_password: Some(password), + .. + } => Ok(Some(password.clone())), + SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } => read_password_string(path) + .map(Into::into) + .map(Option::Some) + .map_err(Error::UnableToReadKeystorePassword), + SigningDefinition::LocalKeystore { .. } => Err(Error::KeystoreWithoutPassword), + SigningDefinition::Web3Signer(_) => Ok(None), + } + } + + pub fn voting_keystore_password_path(&self) -> Option<&PathBuf> { + match self { + SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } => Some(path), + _ => None, + } + } } /// A validator that may be initialized by this validator client. @@ -129,7 +171,7 @@ impl ValidatorDefinition { /// This function does not check the password against the keystore. pub fn new_keystore_with_password>( voting_keystore_path: P, - voting_keystore_password: Option, + voting_keystore_password_storage: PasswordStorage, graffiti: Option, suggested_fee_recipient: Option
, gas_limit: Option, @@ -139,6 +181,12 @@ impl ValidatorDefinition { let keystore = Keystore::from_json_file(&voting_keystore_path).map_err(Error::UnableToOpenKeystore)?; let voting_public_key = keystore.public_key().ok_or(Error::InvalidKeystorePubkey)?; + let (voting_keystore_password_path, voting_keystore_password) = + match voting_keystore_password_storage { + PasswordStorage::ValidatorDefinitions(password) => (None, Some(password)), + PasswordStorage::File(path) => (Some(path), None), + PasswordStorage::None => (None, None), + }; Ok(ValidatorDefinition { enabled: true, @@ -150,7 +198,7 @@ impl ValidatorDefinition { builder_proposals, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, - voting_keystore_password_path: None, + voting_keystore_password_path, voting_keystore_password, }, }) @@ -346,6 +394,13 @@ impl ValidatorDefinitions { pub fn as_mut_slice(&mut self) -> &mut [ValidatorDefinition] { self.0.as_mut_slice() } + + // Returns an iterator over all the `voting_keystore_password_paths` in self. + pub fn iter_voting_keystore_password_paths(&self) -> impl Iterator { + self.0 + .iter() + .filter_map(|def| def.signing_definition.voting_keystore_password_path()) + } } /// Perform an exhaustive tree search of `dir`, adding any discovered voting keystore paths to diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index cd7873c9b6..7bf4cf5b19 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -490,6 +490,21 @@ impl ValidatorClientHttpClient { .await } + /// `DELETE eth/v1/keystores` + pub async fn delete_lighthouse_keystores( + &self, + req: &DeleteKeystoresRequest, + ) -> Result { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("keystores"); + + self.delete_with_unsigned_response(path, req).await + } + fn make_keystores_url(&self) -> Result { let mut url = self.server.full.clone(); url.path_segments_mut() diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 0d67df47a9..33e2f764ef 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -1,9 +1,10 @@ use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; -use slashing_protection::interchange::Interchange; use types::{Address, PublicKeyBytes}; +pub use slashing_protection::interchange::Interchange; + #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct GetFeeRecipientResponse { pub pubkey: PublicKeyBytes, @@ -27,7 +28,7 @@ pub struct ListKeystoresResponse { pub data: Vec, } -#[derive(Debug, Deserialize, Serialize, PartialEq)] +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] pub struct SingleKeystoreResponse { pub validating_pubkey: PublicKeyBytes, pub derivation_path: Option, diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 7bbe041dbd..f1a91b4ef1 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -152,3 +152,19 @@ pub struct UpdateGasLimitRequest { pub struct VoluntaryExitQuery { pub epoch: Option, } + +#[derive(Deserialize, Serialize)] +pub struct ExportKeystoresResponse { + pub data: Vec, + #[serde(with = "serde_utils::json_str")] + pub slashing_protection: Interchange, +} + +#[derive(Deserialize, Serialize)] +pub struct SingleExportKeystoresResponse { + pub status: Status, + #[serde(skip_serializing_if = "Option::is_none")] + pub validating_keystore: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub validating_keystore_password: Option, +} diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 39a14e2837..8accddfcb9 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -20,6 +20,7 @@ tree_hash = "0.5.0" hex = "0.4.2" derivative = "2.1.1" lockfile = { path = "../lockfile" } +directory = { path = "../directory" } [dev-dependencies] tempfile = "3.1.0" diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 2b3f670c70..bccf9086ac 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -1,6 +1,7 @@ use crate::{Error as DirError, ValidatorDir}; use bls::get_withdrawal_credentials; use deposit_contract::{encode_eth1_tx_data, Error as DepositError}; +use directory::ensure_dir_exists; use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText}; use filesystem::create_with_600_perms; use rand::{distributions::Alphanumeric, Rng}; @@ -41,6 +42,7 @@ pub enum Error { #[cfg(feature = "insecure_keys")] InsecureKeysError(String), MissingPasswordDir, + UnableToCreatePasswordDir(String), } impl From for Error { @@ -78,6 +80,13 @@ impl<'a> Builder<'a> { self } + /// Optionally supply a directory in which to store the passwords for the validator keystores. + /// If `None` is provided, do not store the password. + pub fn password_dir_opt(mut self, password_dir_opt: Option) -> Self { + self.password_dir = password_dir_opt; + self + } + /// Build the `ValidatorDir` use the given `keystore` which can be unlocked with `password`. /// /// The builder will not necessarily check that `password` can unlock `keystore`. @@ -153,6 +162,10 @@ impl<'a> Builder<'a> { create_dir_all(&dir).map_err(Error::UnableToCreateDir)?; } + if let Some(password_dir) = &self.password_dir { + ensure_dir_exists(password_dir).map_err(Error::UnableToCreatePasswordDir)?; + } + // The withdrawal keystore must be initialized in order to store it or create an eth1 // deposit. if (self.store_withdrawal_keystore || self.deposit_info.is_some()) @@ -234,7 +247,7 @@ impl<'a> Builder<'a> { if self.store_withdrawal_keystore { // Write the withdrawal password to file. write_password_to_file( - password_dir.join(withdrawal_keypair.pk.as_hex_string()), + keystore_password_path(password_dir, &withdrawal_keystore), withdrawal_password.as_bytes(), )?; @@ -250,7 +263,7 @@ impl<'a> Builder<'a> { if let Some(password_dir) = self.password_dir.as_ref() { // Write the voting password to file. write_password_to_file( - password_dir.join(format!("0x{}", voting_keystore.pubkey())), + keystore_password_path(password_dir, &voting_keystore), voting_password.as_bytes(), )?; } @@ -262,6 +275,12 @@ impl<'a> Builder<'a> { } } +pub fn keystore_password_path>(password_dir: P, keystore: &Keystore) -> PathBuf { + password_dir + .as_ref() + .join(format!("0x{}", keystore.pubkey())) +} + /// Writes a JSON keystore to file. fn write_keystore_to_file(path: PathBuf, keystore: &Keystore) -> Result<(), Error> { if path.exists() { diff --git a/common/validator_dir/src/lib.rs b/common/validator_dir/src/lib.rs index a39d322834..4aa0d590a1 100644 --- a/common/validator_dir/src/lib.rs +++ b/common/validator_dir/src/lib.rs @@ -15,6 +15,6 @@ pub use crate::validator_dir::{ ETH1_DEPOSIT_TX_HASH_FILE, }; pub use builder::{ - Builder, Error as BuilderError, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, - WITHDRAWAL_KEYSTORE_FILE, + keystore_password_path, Builder, Error as BuilderError, ETH1_DEPOSIT_DATA_FILE, + VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE, }; diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index cb1ddde24a..24b317dcfe 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -1,5 +1,5 @@ use crate::builder::{ - ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, + keystore_password_path, ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE, }; use deposit_contract::decode_eth1_tx_data; @@ -219,9 +219,7 @@ pub fn unlock_keypair>( ) .map_err(Error::UnableToReadKeystore)?; - let password_path = password_dir - .as_ref() - .join(format!("0x{}", keystore.pubkey())); + let password_path = keystore_password_path(password_dir, &keystore); let password: PlainText = read(&password_path) .map_err(|_| Error::UnableToReadPassword(password_path))? .into(); diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 874d7cd2bd..85ce351766 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -71,6 +71,7 @@ pub mod sync_duty; pub mod validator; pub mod validator_subscription; pub mod voluntary_exit; +pub mod withdrawal_credentials; #[macro_use] pub mod slot_epoch_macros; pub mod config_and_preset; @@ -189,6 +190,7 @@ pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; pub use crate::withdrawal::Withdrawal; +pub use crate::withdrawal_credentials::WithdrawalCredentials; pub type CommitteeIndex = u64; pub type Hash256 = H256; diff --git a/consensus/types/src/withdrawal_credentials.rs b/consensus/types/src/withdrawal_credentials.rs new file mode 100644 index 0000000000..8d42d4eafd --- /dev/null +++ b/consensus/types/src/withdrawal_credentials.rs @@ -0,0 +1,57 @@ +use crate::*; +use bls::get_withdrawal_credentials; + +pub struct WithdrawalCredentials(Hash256); + +impl WithdrawalCredentials { + pub fn bls(withdrawal_public_key: &PublicKey, spec: &ChainSpec) -> Self { + let withdrawal_credentials = + get_withdrawal_credentials(withdrawal_public_key, spec.bls_withdrawal_prefix_byte); + Self(Hash256::from_slice(&withdrawal_credentials)) + } + + pub fn eth1(withdrawal_address: Address, spec: &ChainSpec) -> Self { + let mut withdrawal_credentials = [0; 32]; + withdrawal_credentials[0] = spec.eth1_address_withdrawal_prefix_byte; + withdrawal_credentials[12..].copy_from_slice(withdrawal_address.as_bytes()); + Self(Hash256::from_slice(&withdrawal_credentials)) + } +} + +impl From for Hash256 { + fn from(withdrawal_credentials: WithdrawalCredentials) -> Self { + withdrawal_credentials.0 + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::test_utils::generate_deterministic_keypair; + use std::str::FromStr; + + #[test] + fn bls_withdrawal_credentials() { + let spec = &MainnetEthSpec::default_spec(); + let keypair = generate_deterministic_keypair(0); + let credentials = WithdrawalCredentials::bls(&keypair.pk, spec); + let manually_generated_credentials = + get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte); + let hash: Hash256 = credentials.into(); + assert_eq!(hash[0], spec.bls_withdrawal_prefix_byte); + assert_eq!(hash.as_bytes(), &manually_generated_credentials); + } + + #[test] + fn eth1_withdrawal_credentials() { + let spec = &MainnetEthSpec::default_spec(); + let address = Address::from_str("0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b").unwrap(); + let credentials = WithdrawalCredentials::eth1(address, spec); + let hash: Hash256 = credentials.into(); + assert_eq!( + hash, + Hash256::from_str("0x01000000000000000000000025c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b") + .unwrap() + ) + } +} diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index e7746a2db9..169aa67fdd 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -56,6 +56,7 @@ directory = { path = "../common/directory" } unused_port = { path = "../common/unused_port" } database_manager = { path = "../database_manager" } slasher = { path = "../slasher" } +validator_manager = { path = "../validator_manager" } [dev-dependencies] tempfile = "3.1.0" @@ -64,6 +65,7 @@ slashing_protection = { path = "../validator_client/slashing_protection" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } sensitive_url = { path = "../common/sensitive_url" } eth1 = { path = "../beacon_node/eth1" } +eth2 = { path = "../common/eth2" } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index b814639ceb..73e042342a 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -329,6 +329,7 @@ fn main() { .subcommand(validator_client::cli_app()) .subcommand(account_manager::cli_app()) .subcommand(database_manager::cli_app()) + .subcommand(validator_manager::cli_app()) .get_matches(); // Configure the allocator early in the process, before it has the chance to use the default values for @@ -567,6 +568,16 @@ fn run( return Ok(()); } + if let Some(sub_matches) = matches.subcommand_matches(validator_manager::CMD) { + eprintln!("Running validator manager for {} network", network_name); + + // Pass the entire `environment` to the account manager so it can run blocking operations. + validator_manager::run::(sub_matches, environment)?; + + // Exit as soon as account manager returns control. + return Ok(()); + } + if let Some(sub_matches) = matches.subcommand_matches(database_manager::CMD) { info!(log, "Running database manager for {} network", network_name); // Pass the entire `environment` to the database manager so it can run blocking operations. diff --git a/lighthouse/tests/main.rs b/lighthouse/tests/main.rs index 806524cab0..bf587f79df 100644 --- a/lighthouse/tests/main.rs +++ b/lighthouse/tests/main.rs @@ -5,3 +5,4 @@ mod beacon_node; mod boot_node; mod exec; mod validator_client; +mod validator_manager; diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 27d7c10e96..9bcfe2a1d5 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -309,6 +309,32 @@ fn http_allow_origin_all_flag() { .run() .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); } +#[test] +fn http_allow_keystore_export_default() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.http_api.allow_keystore_export)); +} +#[test] +fn http_allow_keystore_export_present() { + CommandLineTest::new() + .flag("http-allow-keystore-export", None) + .run() + .with_config(|config| assert!(config.http_api.allow_keystore_export)); +} +#[test] +fn http_store_keystore_passwords_in_secrets_dir_default() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.http_api.store_passwords_in_secrets_dir)); +} +#[test] +fn http_store_keystore_passwords_in_secrets_dir_present() { + CommandLineTest::new() + .flag("http-store-passwords-in-secrets-dir", None) + .run() + .with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir)); +} // Tests for Metrics flags. #[test] diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs new file mode 100644 index 0000000000..e0a1e92d6a --- /dev/null +++ b/lighthouse/tests/validator_manager.rs @@ -0,0 +1,344 @@ +use eth2::SensitiveUrl; +use serde::de::DeserializeOwned; +use std::fs; +use std::marker::PhantomData; +use std::path::PathBuf; +use std::process::{Command, Stdio}; +use std::str::FromStr; +use tempfile::{tempdir, TempDir}; +use types::*; +use validator_manager::{ + create_validators::CreateConfig, + import_validators::ImportConfig, + move_validators::{MoveConfig, PasswordSource, Validators}, +}; + +const EXAMPLE_ETH1_ADDRESS: &str = "0x00000000219ab540356cBB839Cbe05303d7705Fa"; + +const EXAMPLE_PUBKEY_0: &str = "0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95"; +const EXAMPLE_PUBKEY_1: &str = "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c"; + +struct CommandLineTest { + cmd: Command, + config_path: PathBuf, + _dir: TempDir, + _phantom: PhantomData, +} + +impl Default for CommandLineTest { + fn default() -> Self { + let dir = tempdir().unwrap(); + let config_path = dir.path().join("config.json"); + let mut cmd = Command::new(env!("CARGO_BIN_EXE_lighthouse")); + cmd.arg("--dump-config") + .arg(config_path.as_os_str()) + .arg("validator-manager") + .stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::null()); + Self { + cmd, + config_path, + _dir: dir, + _phantom: PhantomData, + } + } +} + +impl CommandLineTest { + fn flag(mut self, flag: &str, value: Option<&str>) -> Self { + self.cmd.arg(flag); + if let Some(value) = value { + self.cmd.arg(value); + } + self + } + + fn run(mut cmd: Command, should_succeed: bool) { + let output = cmd.output().expect("process should complete"); + if output.status.success() != should_succeed { + let stdout = String::from_utf8(output.stdout).unwrap(); + let stderr = String::from_utf8(output.stderr).unwrap(); + eprintln!("{}", stdout); + eprintln!("{}", stderr); + panic!( + "Command success was {} when expecting {}", + !should_succeed, should_succeed + ); + } + } +} + +impl CommandLineTest { + fn assert_success(self, func: F) { + Self::run(self.cmd, true); + let contents = fs::read_to_string(self.config_path).unwrap(); + let config: T = serde_json::from_str(&contents).unwrap(); + func(config) + } + + fn assert_failed(self) { + Self::run(self.cmd, false); + } +} + +impl CommandLineTest { + fn validators_create() -> Self { + Self::default().flag("create", None) + } +} + +impl CommandLineTest { + fn validators_import() -> Self { + Self::default().flag("import", None) + } +} + +impl CommandLineTest { + fn validators_move() -> Self { + Self::default().flag("move", None) + } +} + +#[test] +pub fn validator_create_without_output_path() { + CommandLineTest::validators_create().assert_failed(); +} + +#[test] +pub fn validator_create_defaults() { + CommandLineTest::validators_create() + .flag("--output-path", Some("./meow")) + .flag("--count", Some("1")) + .assert_success(|config| { + let expected = CreateConfig { + output_path: PathBuf::from("./meow"), + first_index: 0, + count: 1, + deposit_gwei: MainnetEthSpec::default_spec().max_effective_balance, + mnemonic_path: None, + stdin_inputs: cfg!(windows) || false, + disable_deposits: false, + specify_voting_keystore_password: false, + eth1_withdrawal_address: None, + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + bn_url: None, + force_bls_withdrawal_credentials: false, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_create_misc_flags() { + CommandLineTest::validators_create() + .flag("--output-path", Some("./meow")) + .flag("--deposit-gwei", Some("42")) + .flag("--first-index", Some("12")) + .flag("--count", Some("9")) + .flag("--mnemonic-path", Some("./woof")) + .flag("--stdin-inputs", None) + .flag("--specify-voting-keystore-password", None) + .flag("--eth1-withdrawal-address", Some(EXAMPLE_ETH1_ADDRESS)) + .flag("--builder-proposals", Some("true")) + .flag("--suggested-fee-recipient", Some(EXAMPLE_ETH1_ADDRESS)) + .flag("--gas-limit", Some("1337")) + .flag("--beacon-node", Some("http://localhost:1001")) + .flag("--force-bls-withdrawal-credentials", None) + .assert_success(|config| { + let expected = CreateConfig { + output_path: PathBuf::from("./meow"), + first_index: 12, + count: 9, + deposit_gwei: 42, + mnemonic_path: Some(PathBuf::from("./woof")), + stdin_inputs: true, + disable_deposits: false, + specify_voting_keystore_password: true, + eth1_withdrawal_address: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), + builder_proposals: Some(true), + fee_recipient: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), + gas_limit: Some(1337), + bn_url: Some(SensitiveUrl::parse("http://localhost:1001").unwrap()), + force_bls_withdrawal_credentials: true, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_create_disable_deposits() { + CommandLineTest::validators_create() + .flag("--output-path", Some("./meow")) + .flag("--count", Some("1")) + .flag("--disable-deposits", None) + .flag("--builder-proposals", Some("false")) + .assert_success(|config| { + assert_eq!(config.disable_deposits, true); + assert_eq!(config.builder_proposals, Some(false)); + }); +} + +#[test] +pub fn validator_import_defaults() { + CommandLineTest::validators_import() + .flag("--validators-file", Some("./vals.json")) + .flag("--vc-token", Some("./token.json")) + .assert_success(|config| { + let expected = ImportConfig { + validators_file_path: PathBuf::from("./vals.json"), + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + ignore_duplicates: false, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_import_misc_flags() { + CommandLineTest::validators_import() + .flag("--validators-file", Some("./vals.json")) + .flag("--vc-token", Some("./token.json")) + .flag("--ignore-duplicates", None) + .assert_success(|config| { + let expected = ImportConfig { + validators_file_path: PathBuf::from("./vals.json"), + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + ignore_duplicates: true, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_import_missing_token() { + CommandLineTest::validators_import() + .flag("--validators-file", Some("./vals.json")) + .assert_failed(); +} + +#[test] +pub fn validator_import_missing_validators_file() { + CommandLineTest::validators_import() + .flag("--vc-token", Some("./token.json")) + .assert_failed(); +} + +#[test] +pub fn validator_move_defaults() { + CommandLineTest::validators_move() + .flag("--src-vc-url", Some("http://localhost:1")) + .flag("--src-vc-token", Some("./1.json")) + .flag("--dest-vc-url", Some("http://localhost:2")) + .flag("--dest-vc-token", Some("./2.json")) + .flag("--validators", Some("all")) + .assert_success(|config| { + let expected = MoveConfig { + src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), + src_vc_token_path: PathBuf::from("./1.json"), + dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), + dest_vc_token_path: PathBuf::from("./2.json"), + validators: Validators::All, + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + password_source: PasswordSource::Interactive { + stdin_inputs: cfg!(windows) || false, + }, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_move_misc_flags_0() { + CommandLineTest::validators_move() + .flag("--src-vc-url", Some("http://localhost:1")) + .flag("--src-vc-token", Some("./1.json")) + .flag("--dest-vc-url", Some("http://localhost:2")) + .flag("--dest-vc-token", Some("./2.json")) + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--builder-proposals", Some("true")) + .flag("--suggested-fee-recipient", Some(EXAMPLE_ETH1_ADDRESS)) + .flag("--gas-limit", Some("1337")) + .flag("--stdin-inputs", None) + .assert_success(|config| { + let expected = MoveConfig { + src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), + src_vc_token_path: PathBuf::from("./1.json"), + dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), + dest_vc_token_path: PathBuf::from("./2.json"), + validators: Validators::Specific(vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap(), + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_1).unwrap(), + ]), + builder_proposals: Some(true), + fee_recipient: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), + gas_limit: Some(1337), + password_source: PasswordSource::Interactive { stdin_inputs: true }, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_move_misc_flags_1() { + CommandLineTest::validators_move() + .flag("--src-vc-url", Some("http://localhost:1")) + .flag("--src-vc-token", Some("./1.json")) + .flag("--dest-vc-url", Some("http://localhost:2")) + .flag("--dest-vc-token", Some("./2.json")) + .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--builder-proposals", Some("false")) + .assert_success(|config| { + let expected = MoveConfig { + src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), + src_vc_token_path: PathBuf::from("./1.json"), + dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), + dest_vc_token_path: PathBuf::from("./2.json"), + validators: Validators::Specific(vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap() + ]), + builder_proposals: Some(false), + fee_recipient: None, + gas_limit: None, + password_source: PasswordSource::Interactive { + stdin_inputs: cfg!(windows) || false, + }, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_move_count() { + CommandLineTest::validators_move() + .flag("--src-vc-url", Some("http://localhost:1")) + .flag("--src-vc-token", Some("./1.json")) + .flag("--dest-vc-url", Some("http://localhost:2")) + .flag("--dest-vc-token", Some("./2.json")) + .flag("--count", Some("42")) + .assert_success(|config| { + let expected = MoveConfig { + src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), + src_vc_token_path: PathBuf::from("./1.json"), + dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), + dest_vc_token_path: PathBuf::from("./2.json"), + validators: Validators::Count(42), + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + password_source: PasswordSource::Interactive { + stdin_inputs: cfg!(windows) || false, + }, + }; + assert_eq!(expected, config); + }); +} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 494ebcb3df..200db73167 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -10,7 +10,6 @@ path = "src/lib.rs" [dev-dependencies] tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } -logging = { path = "../common/logging" } [dependencies] tree_hash = "0.5.0" @@ -63,4 +62,3 @@ malloc_utils = { path = "../common/malloc_utils" } sysinfo = "0.26.5" system_health = { path = "../common/system_health" } logging = { path = "../common/logging" } - diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index a7118aa945..f0a9258c74 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -2,12 +2,12 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; use crate::{ duties_service::{DutiesService, DutyAndProof}, http_metrics::metrics, - validator_store::ValidatorStore, + validator_store::{Error as ValidatorStoreError, ValidatorStore}, OfflineOnFailure, }; use environment::RuntimeContext; use futures::future::join_all; -use slog::{crit, error, info, trace}; +use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; @@ -395,6 +395,20 @@ impl AttestationService { .await { Ok(()) => Some((attestation, duty.validator_index)), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + warn!( + log, + "Missing pubkey for attestation"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "validator" => ?duty.pubkey, + "committee_index" => committee_index, + "slot" => slot.as_u64(), + ); + None + } Err(e) => { crit!( log, @@ -527,10 +541,20 @@ impl AttestationService { .await { Ok(aggregate) => Some(aggregate), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for aggregate"; + "pubkey" => ?pubkey, + ); + None + } Err(e) => { crit!( log, - "Failed to sign attestation"; + "Failed to sign aggregate"; "error" => ?e, "pubkey" => ?duty.pubkey, ); diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index d22e6c95f3..2a09455b6f 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -5,7 +5,10 @@ use crate::{ graffiti_file::GraffitiFile, OfflineOnFailure, }; -use crate::{http_metrics::metrics, validator_store::ValidatorStore}; +use crate::{ + http_metrics::metrics, + validator_store::{Error as ValidatorStoreError, ValidatorStore}, +}; use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use slog::{crit, debug, error, info, trace, warn}; @@ -417,17 +420,31 @@ impl BlockService { BlockError::Recoverable("Unable to determine current slot from clock".to_string()) })?; - let randao_reveal = self + let randao_reveal = match self .validator_store .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) .await - .map_err(|e| { - BlockError::Recoverable(format!( + { + Ok(signature) => signature.into(), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently removed + // via the API. + warn!( + log, + "Missing pubkey for block randao"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "slot" => ?slot + ); + return Ok(()); + } + Err(e) => { + return Err(BlockError::Recoverable(format!( "Unable to produce randao reveal signature: {:?}", e - )) - })? - .into(); + ))) + } + }; let graffiti = determine_graffiti( &validator_pubkey, @@ -522,11 +539,31 @@ impl BlockService { .await?; let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); - let signed_block = self_ref + let signed_block = match self_ref .validator_store .sign_block::(*validator_pubkey_ref, block, current_slot) .await - .map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?; + { + Ok(block) => block, + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently removed + // via the API. + warn!( + log, + "Missing pubkey for block"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "slot" => ?slot + ); + return Ok(()); + } + Err(e) => { + return Err(BlockError::Recoverable(format!( + "Unable to sign block: {:?}", + e + ))) + } + }; let signing_time_ms = Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 436b8eb4d5..0789ac78a0 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -204,6 +204,26 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5062).") .takes_value(true), ) + .arg( + Arg::with_name("http-allow-keystore-export") + .long("http-allow-keystore-export") + .help("If present, allow access to the DELETE /lighthouse/keystores HTTP \ + API method, which allows exporting keystores and passwords to HTTP API \ + consumers who have access to the API token. This method is useful for \ + exporting validators, however it should be used with caution since it \ + exposes private key data to authorized users.") + .required(false) + .takes_value(false), + ) + .arg( + Arg::with_name("http-store-passwords-in-secrets-dir") + .long("http-store-passwords-in-secrets-dir") + .help("If present, any validators created via the HTTP will have keystore \ + passwords stored in the secrets-dir rather than the validator \ + definitions file.") + .required(false) + .takes_value(false), + ) /* Prometheus metrics HTTP server related arguments */ .arg( Arg::with_name("metrics") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index e0dd12e100..7c662db937 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -294,6 +294,14 @@ impl Config { config.http_api.allow_origin = Some(allow_origin.to_string()); } + if cli_args.is_present("http-allow-keystore-export") { + config.http_api.allow_keystore_export = true; + } + + if cli_args.is_present("http-store-passwords-in-secrets-dir") { + config.http_api.store_passwords_in_secrets_dir = true; + } + /* * Prometheus metrics HTTP server */ diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 83cdb936aa..535f6aeb0a 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -932,6 +932,20 @@ async fn fill_in_selection_proofs( for result in duty_and_proof_results { let duty_and_proof = match result { Ok(duty_and_proof) => duty_and_proof, + Err(Error::FailedToProduceSelectionProof( + ValidatorStoreError::UnknownPubkey(pubkey), + )) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + warn!( + log, + "Missing pubkey for duty and proof"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + ); + // Do not abort the entire batch for a single failure. + continue; + } Err(e) => { error!( log, diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index 7a852091aa..1e66d947a2 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -2,6 +2,7 @@ use crate::beacon_node_fallback::{OfflineOnFailure, RequireSynced}; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, + validator_store::Error as ValidatorStoreError, }; use futures::future::join_all; use itertools::Itertools; @@ -539,6 +540,18 @@ pub async fn fill_in_aggregation_proofs( .await { Ok(proof) => proof, + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for sync selection proof"; + "pubkey" => ?pubkey, + "pubkey" => ?duty.pubkey, + "slot" => slot, + ); + return None; + } Err(e) => { warn!( log, diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index f3107cfedb..52336afa59 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -1,15 +1,16 @@ use crate::ValidatorStore; -use account_utils::validator_definitions::ValidatorDefinition; +use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; use account_utils::{ + eth2_keystore::Keystore, eth2_wallet::{bip39::Mnemonic, WalletBuilder}, random_mnemonic, random_password, ZeroizeString, }; use eth2::lighthouse_vc::types::{self as api_types}; use slot_clock::SlotClock; -use std::path::Path; +use std::path::{Path, PathBuf}; use types::ChainSpec; use types::EthSpec; -use validator_dir::Builder as ValidatorDirBuilder; +use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; /// Create some validator EIP-2335 keystores and store them on disk. Then, enroll the validators in /// this validator client. @@ -27,6 +28,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, key_derivation_path_offset: Option, validator_requests: &[api_types::ValidatorRequest], validator_dir: P, + secrets_dir: Option, validator_store: &ValidatorStore, spec: &ChainSpec, ) -> Result<(Vec, Mnemonic), warp::Rejection> { @@ -95,7 +97,11 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, )) })?; + let voting_password_storage = + get_voting_password_storage(&secrets_dir, &keystores.voting, &voting_password_string)?; + let validator_dir = ValidatorDirBuilder::new(validator_dir.as_ref().into()) + .password_dir_opt(secrets_dir.clone()) .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) .create_eth1_tx_data(request.deposit_gwei, spec) @@ -136,7 +142,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, validator_store .add_validator_keystore( voting_keystore_path, - voting_password_string, + voting_password_storage, request.enable, request.graffiti.clone(), request.suggested_fee_recipient, @@ -185,3 +191,26 @@ pub async fn create_validators_web3signer( Ok(()) } + +/// Attempts to return a `PasswordStorage::File` if `secrets_dir` is defined. +/// Otherwise, returns a `PasswordStorage::ValidatorDefinitions`. +pub fn get_voting_password_storage( + secrets_dir: &Option, + voting_keystore: &Keystore, + voting_password_string: &ZeroizeString, +) -> Result { + if let Some(secrets_dir) = &secrets_dir { + let password_path = keystore_password_path(secrets_dir, voting_keystore); + if password_path.exists() { + Err(warp_utils::reject::custom_server_error( + "Duplicate keystore password path".to_string(), + )) + } else { + Ok(PasswordStorage::File(password_path)) + } + } else { + Ok(PasswordStorage::ValidatorDefinitions( + voting_password_string.clone(), + )) + } +} diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index b886f60435..c2d9b4d67f 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -3,11 +3,14 @@ use crate::{ initialized_validators::Error, signing_method::SigningMethod, InitializedValidators, ValidatorStore, }; -use account_utils::ZeroizeString; -use eth2::lighthouse_vc::std_types::{ - DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, ImportKeystoreStatus, - ImportKeystoresRequest, ImportKeystoresResponse, InterchangeJsonStr, KeystoreJsonStr, - ListKeystoresResponse, SingleKeystoreResponse, Status, +use account_utils::{validator_definitions::PasswordStorage, ZeroizeString}; +use eth2::lighthouse_vc::{ + std_types::{ + DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, + ImportKeystoreStatus, ImportKeystoresRequest, ImportKeystoresResponse, InterchangeJsonStr, + KeystoreJsonStr, ListKeystoresResponse, SingleKeystoreResponse, Status, + }, + types::{ExportKeystoresResponse, SingleExportKeystoresResponse}, }; use eth2_keystore::Keystore; use slog::{info, warn, Logger}; @@ -17,7 +20,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; use types::{EthSpec, PublicKeyBytes}; -use validator_dir::Builder as ValidatorDirBuilder; +use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; use warp::Rejection; use warp_utils::reject::{custom_bad_request, custom_server_error}; @@ -58,6 +61,7 @@ pub fn list( pub fn import( request: ImportKeystoresRequest, validator_dir: PathBuf, + secrets_dir: Option, validator_store: Arc>, task_executor: TaskExecutor, log: Logger, @@ -128,6 +132,7 @@ pub fn import( keystore, password, validator_dir.clone(), + secrets_dir.clone(), &validator_store, handle, ) { @@ -158,6 +163,7 @@ fn import_single_keystore( keystore: Keystore, password: ZeroizeString, validator_dir_path: PathBuf, + secrets_dir: Option, validator_store: &ValidatorStore, handle: Handle, ) -> Result { @@ -179,6 +185,16 @@ fn import_single_keystore( } } + let password_storage = if let Some(secrets_dir) = &secrets_dir { + let password_path = keystore_password_path(secrets_dir, &keystore); + if password_path.exists() { + return Ok(ImportKeystoreStatus::Duplicate); + } + PasswordStorage::File(password_path) + } else { + PasswordStorage::ValidatorDefinitions(password.clone()) + }; + // Check that the password is correct. // In future we should re-structure to avoid the double decryption here. It's not as simple // as removing this check because `add_validator_keystore` will break if provided with an @@ -189,6 +205,7 @@ fn import_single_keystore( .map_err(|e| format!("incorrect password: {:?}", e))?; let validator_dir = ValidatorDirBuilder::new(validator_dir_path) + .password_dir_opt(secrets_dir) .voting_keystore(keystore, password.as_ref()) .store_withdrawal_keystore(false) .build() @@ -201,7 +218,7 @@ fn import_single_keystore( handle .block_on(validator_store.add_validator_keystore( voting_keystore_path, - password, + password_storage, true, None, None, @@ -219,11 +236,28 @@ pub fn delete( task_executor: TaskExecutor, log: Logger, ) -> Result { + let export_response = export(request, validator_store, task_executor, log)?; + Ok(DeleteKeystoresResponse { + data: export_response + .data + .into_iter() + .map(|response| response.status) + .collect(), + slashing_protection: export_response.slashing_protection, + }) +} + +pub fn export( + request: DeleteKeystoresRequest, + validator_store: Arc>, + task_executor: TaskExecutor, + log: Logger, +) -> Result { // Remove from initialized validators. let initialized_validators_rwlock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rwlock.write(); - let mut statuses = request + let mut responses = request .pubkeys .iter() .map(|pubkey_bytes| { @@ -232,7 +266,7 @@ pub fn delete( &mut initialized_validators, task_executor.clone(), ) { - Ok(status) => Status::ok(status), + Ok(status) => status, Err(error) => { warn!( log, @@ -240,7 +274,11 @@ pub fn delete( "pubkey" => ?pubkey_bytes, "error" => ?error, ); - Status::error(DeleteKeystoreStatus::Error, error) + SingleExportKeystoresResponse { + status: Status::error(DeleteKeystoreStatus::Error, error), + validating_keystore: None, + validating_keystore_password: None, + } } } }) @@ -263,19 +301,19 @@ pub fn delete( })?; // Update stasuses based on availability of slashing protection data. - for (pubkey, status) in request.pubkeys.iter().zip(statuses.iter_mut()) { - if status.status == DeleteKeystoreStatus::NotFound + for (pubkey, response) in request.pubkeys.iter().zip(responses.iter_mut()) { + if response.status.status == DeleteKeystoreStatus::NotFound && slashing_protection .data .iter() .any(|interchange_data| interchange_data.pubkey == *pubkey) { - status.status = DeleteKeystoreStatus::NotActive; + response.status.status = DeleteKeystoreStatus::NotActive; } } - Ok(DeleteKeystoresResponse { - data: statuses, + Ok(ExportKeystoresResponse { + data: responses, slashing_protection, }) } @@ -284,7 +322,7 @@ fn delete_single_keystore( pubkey_bytes: &PublicKeyBytes, initialized_validators: &mut InitializedValidators, task_executor: TaskExecutor, -) -> Result { +) -> Result { if let Some(handle) = task_executor.handle() { let pubkey = pubkey_bytes .decompress() @@ -292,9 +330,22 @@ fn delete_single_keystore( match handle.block_on(initialized_validators.delete_definition_and_keystore(&pubkey, true)) { - Ok(_) => Ok(DeleteKeystoreStatus::Deleted), + Ok(Some(keystore_and_password)) => Ok(SingleExportKeystoresResponse { + status: Status::ok(DeleteKeystoreStatus::Deleted), + validating_keystore: Some(KeystoreJsonStr(keystore_and_password.keystore)), + validating_keystore_password: keystore_and_password.password, + }), + Ok(None) => Ok(SingleExportKeystoresResponse { + status: Status::ok(DeleteKeystoreStatus::Deleted), + validating_keystore: None, + validating_keystore_password: None, + }), Err(e) => match e { - Error::ValidatorNotInitialized(_) => Ok(DeleteKeystoreStatus::NotFound), + Error::ValidatorNotInitialized(_) => Ok(SingleExportKeystoresResponse { + status: Status::ok(DeleteKeystoreStatus::NotFound), + validating_keystore: None, + validating_keystore_password: None, + }), _ => Err(format!("unable to disable and delete: {:?}", e)), }, } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index f08c8da1bd..f654833cbb 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -5,6 +5,8 @@ mod keystores; mod remotekeys; mod tests; +pub mod test_utils; + use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit; use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ @@ -12,7 +14,9 @@ use account_utils::{ validator_definitions::{SigningDefinition, ValidatorDefinition, Web3SignerDefinition}, }; pub use api_secret::ApiSecret; -use create_validator::{create_validators_mnemonic, create_validators_web3signer}; +use create_validator::{ + create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, +}; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{self as api_types, GenericResponse, Graffiti, PublicKey, PublicKeyBytes}, @@ -71,6 +75,7 @@ pub struct Context { pub api_secret: ApiSecret, pub validator_store: Option>>, pub validator_dir: Option, + pub secrets_dir: Option, pub graffiti_file: Option, pub graffiti_flag: Option, pub spec: ChainSpec, @@ -88,6 +93,8 @@ pub struct Config { pub listen_addr: IpAddr, pub listen_port: u16, pub allow_origin: Option, + pub allow_keystore_export: bool, + pub store_passwords_in_secrets_dir: bool, } impl Default for Config { @@ -97,6 +104,8 @@ impl Default for Config { listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: 5062, allow_origin: None, + allow_keystore_export: false, + store_passwords_in_secrets_dir: false, } } } @@ -121,6 +130,8 @@ pub fn serve( shutdown: impl Future + Send + Sync + 'static, ) -> Result<(SocketAddr, impl Future), Error> { let config = &ctx.config; + let allow_keystore_export = config.allow_keystore_export; + let store_passwords_in_secrets_dir = config.store_passwords_in_secrets_dir; let log = ctx.log.clone(); // Configure CORS. @@ -187,6 +198,17 @@ pub fn serve( }) }); + let inner_secrets_dir = ctx.secrets_dir.clone(); + let secrets_dir_filter = warp::any().map(move || inner_secrets_dir.clone()).and_then( + |secrets_dir: Option<_>| async move { + secrets_dir.ok_or_else(|| { + warp_utils::reject::custom_not_found( + "secrets_dir directory is not initialized.".to_string(), + ) + }) + }, + ); + let inner_graffiti_file = ctx.graffiti_file.clone(); let graffiti_file_filter = warp::any().map(move || inner_graffiti_file.clone()); @@ -394,18 +416,21 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_dir_filter.clone()) + .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) .and(spec_filter.clone()) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( - |body: Vec, - validator_dir: PathBuf, - validator_store: Arc>, - spec: Arc, - signer, - task_executor: TaskExecutor| { + move |body: Vec, + validator_dir: PathBuf, + secrets_dir: PathBuf, + validator_store: Arc>, + spec: Arc, + signer, + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { + let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); if let Some(handle) = task_executor.handle() { let (validators, mnemonic) = handle.block_on(create_validators_mnemonic( @@ -413,6 +438,7 @@ pub fn serve( None, &body, &validator_dir, + secrets_dir, &validator_store, &spec, ))?; @@ -437,18 +463,21 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_dir_filter.clone()) + .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) .and(spec_filter) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( - |body: api_types::CreateValidatorsMnemonicRequest, - validator_dir: PathBuf, - validator_store: Arc>, - spec: Arc, - signer, - task_executor: TaskExecutor| { + move |body: api_types::CreateValidatorsMnemonicRequest, + validator_dir: PathBuf, + secrets_dir: PathBuf, + validator_store: Arc>, + spec: Arc, + signer, + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { + let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); if let Some(handle) = task_executor.handle() { let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| { @@ -463,6 +492,7 @@ pub fn serve( Some(body.key_derivation_path_offset), &body.validators, &validator_dir, + secrets_dir, &validator_store, &spec, ))?; @@ -483,15 +513,17 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_dir_filter.clone()) + .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( - |body: api_types::KeystoreValidatorsPostRequest, - validator_dir: PathBuf, - validator_store: Arc>, - signer, - task_executor: TaskExecutor| { + move |body: api_types::KeystoreValidatorsPostRequest, + validator_dir: PathBuf, + secrets_dir: PathBuf, + validator_store: Arc>, + signer, + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { // Check to ensure the password is correct. let keypair = body @@ -504,7 +536,12 @@ pub fn serve( )) })?; + let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); + let password_storage = + get_voting_password_storage(&secrets_dir, &body.keystore, &body.password)?; + let validator_dir = ValidatorDirBuilder::new(validator_dir.clone()) + .password_dir_opt(secrets_dir) .voting_keystore(body.keystore.clone(), body.password.as_ref()) .store_withdrawal_keystore(false) .build() @@ -518,7 +555,6 @@ pub fn serve( // Drop validator dir so that `add_validator_keystore` can re-lock the keystore. let voting_keystore_path = validator_dir.voting_keystore_path(); drop(validator_dir); - let voting_password = body.password.clone(); let graffiti = body.graffiti.clone(); let suggested_fee_recipient = body.suggested_fee_recipient; let gas_limit = body.gas_limit; @@ -529,7 +565,7 @@ pub fn serve( handle .block_on(validator_store.add_validator_keystore( voting_keystore_path, - voting_password, + password_storage, body.enable, graffiti, suggested_fee_recipient, @@ -698,6 +734,29 @@ pub fn serve( }) }); + // DELETE /lighthouse/keystores + let delete_lighthouse_keystores = warp::path("lighthouse") + .and(warp::path("keystores")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and(task_executor_filter.clone()) + .and(log_filter.clone()) + .and_then( + move |request, signer, validator_store, task_executor, log| { + blocking_signed_json_task(signer, move || { + if allow_keystore_export { + keystores::export(request, validator_store, task_executor, log) + } else { + Err(warp_utils::reject::custom_bad_request( + "keystore export is disabled".to_string(), + )) + } + }) + }, + ); + // Standard key-manager endpoints. let eth_v1 = warp::path("eth").and(warp::path("v1")); let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); @@ -982,13 +1041,28 @@ pub fn serve( .and(warp::body::json()) .and(signer.clone()) .and(validator_dir_filter) + .and(secrets_dir_filter) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) .and(log_filter.clone()) .and_then( - |request, signer, validator_dir, validator_store, task_executor, log| { + move |request, + signer, + validator_dir, + secrets_dir, + validator_store, + task_executor, + log| { + let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); blocking_signed_json_task(signer, move || { - keystores::import(request, validator_dir, validator_store, task_executor, log) + keystores::import( + request, + validator_dir, + secrets_dir, + validator_store, + task_executor, + log, + ) }) }, ); @@ -1117,7 +1191,8 @@ pub fn serve( )) .or(warp::patch().and(patch_validators)) .or(warp::delete().and( - delete_fee_recipient + delete_lighthouse_keystores + .or(delete_fee_recipient) .or(delete_gas_limit) .or(delete_std_keystores) .or(delete_std_remotekeys), diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/src/http_api/test_utils.rs new file mode 100644 index 0000000000..c7558dd586 --- /dev/null +++ b/validator_client/src/http_api/test_utils.rs @@ -0,0 +1,631 @@ +use crate::doppelganger_service::DoppelgangerService; +use crate::key_cache::{KeyCache, CACHE_FILENAME}; +use crate::{ + http_api::{ApiSecret, Config as HttpConfig, Context}, + initialized_validators::{InitializedValidators, OnDecryptFailure}, + Config, ValidatorDefinitions, ValidatorStore, +}; +use account_utils::{ + eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, + ZeroizeString, +}; +use deposit_contract::decode_eth1_tx_data; +use eth2::{ + lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*}, + types::ErrorMessage as ApiErrorMessage, + Error as ApiError, +}; +use eth2_keystore::KeystoreBuilder; +use logging::test_logger; +use parking_lot::RwLock; +use sensitive_url::SensitiveUrl; +use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slot_clock::{SlotClock, TestingSlotClock}; +use std::future::Future; +use std::marker::PhantomData; +use std::net::{IpAddr, Ipv4Addr}; +use std::sync::Arc; +use std::time::Duration; +use task_executor::test_utils::TestRuntime; +use tempfile::{tempdir, TempDir}; +use tokio::sync::oneshot; + +pub const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; +pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); + +type E = MainnetEthSpec; + +pub struct HdValidatorScenario { + pub count: usize, + pub specify_mnemonic: bool, + pub key_derivation_path_offset: u32, + pub disabled: Vec, +} + +pub struct KeystoreValidatorScenario { + pub enabled: bool, + pub correct_password: bool, +} + +pub struct Web3SignerValidatorScenario { + pub count: usize, + pub enabled: bool, +} + +pub struct ApiTester { + pub client: ValidatorClientHttpClient, + pub initialized_validators: Arc>, + pub validator_store: Arc>, + pub url: SensitiveUrl, + pub api_token: String, + pub test_runtime: TestRuntime, + pub _server_shutdown: oneshot::Sender<()>, + pub validator_dir: TempDir, + pub secrets_dir: TempDir, +} + +impl ApiTester { + pub async fn new() -> Self { + Self::new_with_http_config(Self::default_http_config()).await + } + + pub async fn new_with_http_config(http_config: HttpConfig) -> Self { + let log = test_logger(); + + let validator_dir = tempdir().unwrap(); + let secrets_dir = tempdir().unwrap(); + + let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); + + let initialized_validators = InitializedValidators::from_definitions( + validator_defs, + validator_dir.path().into(), + log.clone(), + ) + .await + .unwrap(); + + let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_pubkey = api_secret.api_token(); + + let config = Config { + validator_dir: validator_dir.path().into(), + secrets_dir: secrets_dir.path().into(), + fee_recipient: Some(TEST_DEFAULT_FEE_RECIPIENT), + ..Default::default() + }; + + let spec = E::default_spec(); + + let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); + let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); + + let slot_clock = + TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + + let test_runtime = TestRuntime::default(); + + let validator_store = Arc::new(ValidatorStore::<_, E>::new( + initialized_validators, + slashing_protection, + Hash256::repeat_byte(42), + spec, + Some(Arc::new(DoppelgangerService::new(log.clone()))), + slot_clock.clone(), + &config, + test_runtime.task_executor.clone(), + log.clone(), + )); + + validator_store + .register_all_in_doppelganger_protection_if_enabled() + .expect("Should attach doppelganger service"); + + let initialized_validators = validator_store.initialized_validators(); + + let context = Arc::new(Context { + task_executor: test_runtime.task_executor.clone(), + api_secret, + validator_dir: Some(validator_dir.path().into()), + secrets_dir: Some(secrets_dir.path().into()), + validator_store: Some(validator_store.clone()), + graffiti_file: None, + graffiti_flag: Some(Graffiti::default()), + spec: E::default_spec(), + config: http_config, + log, + sse_logging_components: None, + slot_clock, + _phantom: PhantomData, + }); + let ctx = context; + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = super::serve(ctx, server_shutdown).unwrap(); + + tokio::spawn(server); + + let url = SensitiveUrl::parse(&format!( + "http://{}:{}", + listening_socket.ip(), + listening_socket.port() + )) + .unwrap(); + + let client = ValidatorClientHttpClient::new(url.clone(), api_pubkey.clone()).unwrap(); + + Self { + client, + initialized_validators, + validator_store, + url, + api_token: api_pubkey, + test_runtime, + _server_shutdown: shutdown_tx, + validator_dir, + secrets_dir, + } + } + + pub fn default_http_config() -> HttpConfig { + HttpConfig { + enabled: true, + listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + listen_port: 0, + allow_origin: None, + allow_keystore_export: true, + store_passwords_in_secrets_dir: false, + } + } + + /// Checks that the key cache exists and can be decrypted with the current + /// set of known validators. + #[allow(clippy::await_holding_lock)] // This is a test, so it should be fine. + pub async fn ensure_key_cache_consistency(&self) { + assert!( + self.validator_dir.as_ref().join(CACHE_FILENAME).exists(), + "the key cache should exist" + ); + let key_cache = + KeyCache::open_or_create(self.validator_dir.as_ref()).expect("should open a key cache"); + + self.initialized_validators + .read() + .decrypt_key_cache(key_cache, &mut <_>::default(), OnDecryptFailure::Error) + .await + .expect("key cache should decypt"); + } + + pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { + let tmp = tempdir().unwrap(); + let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let invalid_pubkey = api_secret.api_token(); + ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey).unwrap() + } + + pub async fn test_with_invalid_auth(self, func: F) -> Self + where + F: Fn(ValidatorClientHttpClient) -> A, + A: Future>, + { + /* + * Test with an invalid Authorization header. + */ + match func(self.invalid_token_client()).await { + Err(ApiError::ServerMessage(ApiErrorMessage { code: 403, .. })) => (), + Err(other) => panic!("expected authorized error, got {:?}", other), + Ok(_) => panic!("expected authorized error, got Ok"), + } + + /* + * Test with a missing Authorization header. + */ + let mut missing_token_client = self.client.clone(); + missing_token_client.send_authorization_header(false); + match func(missing_token_client).await { + Err(ApiError::ServerMessage(ApiErrorMessage { + code: 401, message, .. + })) if message.contains("missing Authorization header") => (), + Err(other) => panic!("expected missing header error, got {:?}", other), + Ok(_) => panic!("expected missing header error, got Ok"), + } + + self + } + + pub fn invalidate_api_token(mut self) -> Self { + self.client = self.invalid_token_client(); + self + } + + pub async fn test_get_lighthouse_version_invalid(self) -> Self { + self.client.get_lighthouse_version().await.unwrap_err(); + self + } + + pub async fn test_get_lighthouse_spec(self) -> Self { + let result = self + .client + .get_lighthouse_spec::() + .await + .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .unwrap(); + let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_lighthouse_version(self) -> Self { + let result = self.client.get_lighthouse_version().await.unwrap().data; + + let expected = VersionData { + version: lighthouse_version::version_with_platform(), + }; + + assert_eq!(result, expected); + + self + } + + #[cfg(target_os = "linux")] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap(); + + self + } + + #[cfg(not(target_os = "linux"))] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap_err(); + + self + } + pub fn vals_total(&self) -> usize { + self.initialized_validators.read().num_total() + } + + pub fn vals_enabled(&self) -> usize { + self.initialized_validators.read().num_enabled() + } + + pub fn assert_enabled_validators_count(self, count: usize) -> Self { + assert_eq!(self.vals_enabled(), count); + self + } + + pub fn assert_validators_count(self, count: usize) -> Self { + assert_eq!(self.vals_total(), count); + self + } + + pub async fn create_hd_validators(self, s: HdValidatorScenario) -> Self { + let initial_vals = self.vals_total(); + let initial_enabled_vals = self.vals_enabled(); + + let validators = (0..s.count) + .map(|i| ValidatorRequest { + enable: !s.disabled.contains(&i), + description: format!("boi #{}", i), + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + deposit_gwei: E::default_spec().max_effective_balance, + }) + .collect::>(); + + let (response, mnemonic) = if s.specify_mnemonic { + let mnemonic = ZeroizeString::from(random_mnemonic().phrase().to_string()); + let request = CreateValidatorsMnemonicRequest { + mnemonic: mnemonic.clone(), + key_derivation_path_offset: s.key_derivation_path_offset, + validators: validators.clone(), + }; + let response = self + .client + .post_lighthouse_validators_mnemonic(&request) + .await + .unwrap() + .data; + + (response, mnemonic) + } else { + assert_eq!( + s.key_derivation_path_offset, 0, + "cannot use a derivation offset without specifying a mnemonic" + ); + let response = self + .client + .post_lighthouse_validators(validators.clone()) + .await + .unwrap() + .data; + (response.validators.clone(), response.mnemonic) + }; + + assert_eq!(response.len(), s.count); + assert_eq!(self.vals_total(), initial_vals + s.count); + assert_eq!( + self.vals_enabled(), + initial_enabled_vals + s.count - s.disabled.len() + ); + + let server_vals = self.client.get_lighthouse_validators().await.unwrap().data; + + assert_eq!(server_vals.len(), self.vals_total()); + + // Ensure the server lists all of these newly created validators. + for validator in &response { + assert!(server_vals + .iter() + .any(|server_val| server_val.voting_pubkey == validator.voting_pubkey)); + } + + /* + * Verify that we can regenerate all the keys from the mnemonic. + */ + + let mnemonic = mnemonic_from_phrase(mnemonic.as_str()).unwrap(); + let mut wallet = WalletBuilder::from_mnemonic(&mnemonic, PASSWORD_BYTES, "".to_string()) + .unwrap() + .build() + .unwrap(); + + wallet + .set_nextaccount(s.key_derivation_path_offset) + .unwrap(); + + for item in response.iter().take(s.count) { + let keypairs = wallet + .next_validator(PASSWORD_BYTES, PASSWORD_BYTES, PASSWORD_BYTES) + .unwrap(); + let voting_keypair = keypairs.voting.decrypt_keypair(PASSWORD_BYTES).unwrap(); + + assert_eq!( + item.voting_pubkey, + voting_keypair.pk.clone().into(), + "the locally generated voting pk should match the server response" + ); + + let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); + + let deposit_bytes = serde_utils::hex::decode(&item.eth1_deposit_tx_data).unwrap(); + + let (deposit_data, _) = + decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) + .unwrap(); + + assert_eq!( + deposit_data.pubkey, + voting_keypair.pk.clone().into(), + "the locally generated voting pk should match the deposit data" + ); + + assert_eq!( + deposit_data.withdrawal_credentials, + Hash256::from_slice(&bls::get_withdrawal_credentials( + &withdrawal_keypair.pk, + E::default_spec().bls_withdrawal_prefix_byte + )), + "the locally generated withdrawal creds should match the deposit data" + ); + + assert_eq!( + deposit_data.signature, + deposit_data.create_signature(&voting_keypair.sk, &E::default_spec()), + "the locally-generated deposit sig should create the same deposit sig" + ); + } + + self + } + + pub async fn create_keystore_validators(self, s: KeystoreValidatorScenario) -> Self { + let initial_vals = self.vals_total(); + let initial_enabled_vals = self.vals_enabled(); + + let password = random_password(); + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, password.as_bytes(), String::new()) + .unwrap() + .build() + .unwrap(); + + if !s.correct_password { + let request = KeystoreValidatorsPostRequest { + enable: s.enabled, + password: String::from_utf8(random_password().as_ref().to_vec()) + .unwrap() + .into(), + keystore, + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + }; + + self.client + .post_lighthouse_validators_keystore(&request) + .await + .unwrap_err(); + + return self; + } + + let request = KeystoreValidatorsPostRequest { + enable: s.enabled, + password: String::from_utf8(password.as_ref().to_vec()) + .unwrap() + .into(), + keystore, + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + }; + + let response = self + .client + .post_lighthouse_validators_keystore(&request) + .await + .unwrap() + .data; + + let num_enabled = s.enabled as usize; + + assert_eq!(self.vals_total(), initial_vals + 1); + assert_eq!(self.vals_enabled(), initial_enabled_vals + num_enabled); + + let server_vals = self.client.get_lighthouse_validators().await.unwrap().data; + + assert_eq!(server_vals.len(), self.vals_total()); + + assert_eq!(response.voting_pubkey, keypair.pk.into()); + assert_eq!(response.enabled, s.enabled); + + self + } + + pub async fn create_web3signer_validators(self, s: Web3SignerValidatorScenario) -> Self { + let initial_vals = self.vals_total(); + let initial_enabled_vals = self.vals_enabled(); + + let request: Vec<_> = (0..s.count) + .map(|i| { + let kp = Keypair::random(); + Web3SignerValidatorRequest { + enable: s.enabled, + description: format!("{}", i), + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + voting_public_key: kp.pk, + url: format!("http://signer_{}.com/", i), + root_certificate_path: None, + request_timeout_ms: None, + client_identity_path: None, + client_identity_password: None, + } + }) + .collect(); + + self.client + .post_lighthouse_validators_web3signer(&request) + .await + .unwrap(); + + assert_eq!(self.vals_total(), initial_vals + s.count); + if s.enabled { + assert_eq!(self.vals_enabled(), initial_enabled_vals + s.count); + } else { + assert_eq!(self.vals_enabled(), initial_enabled_vals); + }; + + self + } + + pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None, None) + .await + .unwrap(); + + assert_eq!( + self.initialized_validators + .read() + .is_enabled(&validator.voting_pubkey.decompress().unwrap()) + .unwrap(), + enabled + ); + + assert!(self + .client + .get_lighthouse_validators() + .await + .unwrap() + .data + .into_iter() + .find(|v| v.voting_pubkey == validator.voting_pubkey) + .map(|v| v.enabled == enabled) + .unwrap()); + + // Check the server via an individual request. + assert_eq!( + self.client + .get_lighthouse_validators_pubkey(&validator.voting_pubkey) + .await + .unwrap() + .unwrap() + .data + .enabled, + enabled + ); + + self + } + + pub async fn set_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + Some(gas_limit), + None, + None, + ) + .await + .unwrap(); + + self + } + + pub async fn assert_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store.get_gas_limit(&validator.voting_pubkey), + gas_limit + ); + + self + } + + pub async fn set_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + Some(builder_proposals), + None, + ) + .await + .unwrap(); + + self + } + + pub async fn assert_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store + .get_builder_proposals(&validator.voting_pubkey), + builder_proposals + ); + + self + } +} diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index dbb9d4d620..3bff444703 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -31,10 +31,8 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use task_executor::TaskExecutor; +use task_executor::test_utils::TestRuntime; use tempfile::{tempdir, TempDir}; -use tokio::runtime::Runtime; -use tokio::sync::oneshot; use types::graffiti::GraffitiString; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; @@ -48,23 +46,12 @@ struct ApiTester { validator_store: Arc>, url: SensitiveUrl, slot_clock: TestingSlotClock, - _server_shutdown: oneshot::Sender<()>, _validator_dir: TempDir, - _runtime_shutdown: exit_future::Signal, -} - -// Builds a runtime to be used in the testing configuration. -fn build_runtime() -> Arc { - Arc::new( - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .expect("Should be able to build a testing runtime"), - ) + _test_runtime: TestRuntime, } impl ApiTester { - pub async fn new(runtime: std::sync::Weak) -> Self { + pub async fn new() -> Self { let log = test_logger(); let validator_dir = tempdir().unwrap(); @@ -100,9 +87,7 @@ impl ApiTester { Duration::from_secs(1), ); - let (runtime_shutdown, exit) = exit_future::signal(); - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = TaskExecutor::new(runtime.clone(), exit, log.clone(), shutdown_tx); + let test_runtime = TestRuntime::default(); let validator_store = Arc::new(ValidatorStore::<_, E>::new( initialized_validators, @@ -112,7 +97,7 @@ impl ApiTester { Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock.clone(), &config, - executor.clone(), + test_runtime.task_executor.clone(), log.clone(), )); @@ -123,9 +108,10 @@ impl ApiTester { let initialized_validators = validator_store.initialized_validators(); let context = Arc::new(Context { - task_executor: executor, + task_executor: test_runtime.task_executor.clone(), api_secret, validator_dir: Some(validator_dir.path().into()), + secrets_dir: Some(secrets_dir.path().into()), validator_store: Some(validator_store.clone()), graffiti_file: None, graffiti_flag: Some(Graffiti::default()), @@ -135,6 +121,8 @@ impl ApiTester { listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: 0, allow_origin: None, + allow_keystore_export: true, + store_passwords_in_secrets_dir: false, }, sse_logging_components: None, log, @@ -142,12 +130,8 @@ impl ApiTester { _phantom: PhantomData, }); let ctx = context.clone(); - let (shutdown_tx, shutdown_rx) = oneshot::channel(); - let server_shutdown = async { - // It's not really interesting why this triggered, just that it happened. - let _ = shutdown_rx.await; - }; - let (listening_socket, server) = super::serve(ctx, server_shutdown).unwrap(); + let (listening_socket, server) = + super::serve(ctx, test_runtime.task_executor.exit()).unwrap(); tokio::spawn(async { server.await }); @@ -166,9 +150,8 @@ impl ApiTester { validator_store, url, slot_clock, - _server_shutdown: shutdown_tx, _validator_dir: validator_dir, - _runtime_shutdown: runtime_shutdown, + _test_runtime: test_runtime, } } @@ -676,387 +659,341 @@ struct Web3SignerValidatorScenario { enabled: bool, } -#[test] -fn invalid_pubkey() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .invalidate_api_token() - .test_get_lighthouse_version_invalid() - .await; - }); +#[tokio::test] +async fn invalid_pubkey() { + ApiTester::new() + .await + .invalidate_api_token() + .test_get_lighthouse_version_invalid() + .await; } -#[test] -fn routes_with_invalid_auth() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .test_with_invalid_auth(|client| async move { client.get_lighthouse_version().await }) - .await - .test_with_invalid_auth(|client| async move { client.get_lighthouse_health().await }) - .await - .test_with_invalid_auth(|client| async move { - client.get_lighthouse_spec::().await - }) - .await - .test_with_invalid_auth( - |client| async move { client.get_lighthouse_validators().await }, - ) - .await - .test_with_invalid_auth(|client| async move { - client - .get_lighthouse_validators_pubkey(&PublicKeyBytes::empty()) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - client - .post_lighthouse_validators(vec![ValidatorRequest { - enable: <_>::default(), - description: <_>::default(), - graffiti: <_>::default(), - suggested_fee_recipient: <_>::default(), - gas_limit: <_>::default(), - builder_proposals: <_>::default(), - deposit_gwei: <_>::default(), - }]) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - client - .post_lighthouse_validators_mnemonic(&CreateValidatorsMnemonicRequest { - mnemonic: String::default().into(), - key_derivation_path_offset: <_>::default(), - validators: <_>::default(), - }) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - let password = random_password(); - let keypair = Keypair::random(); - let keystore = KeystoreBuilder::new(&keypair, password.as_bytes(), String::new()) - .unwrap() - .build() - .unwrap(); - client - .post_lighthouse_validators_keystore(&KeystoreValidatorsPostRequest { - password: String::default().into(), - enable: <_>::default(), - keystore, - graffiti: <_>::default(), - suggested_fee_recipient: <_>::default(), - gas_limit: <_>::default(), - builder_proposals: <_>::default(), - }) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - client - .patch_lighthouse_validators( - &PublicKeyBytes::empty(), - Some(false), - None, - None, - None, - ) - .await - }) - .await - .test_with_invalid_auth(|client| async move { client.get_keystores().await }) - .await - .test_with_invalid_auth(|client| async move { - let password = random_password_string(); - let keypair = Keypair::random(); - let keystore = KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) - .unwrap() - .build() - .map(KeystoreJsonStr) - .unwrap(); - client - .post_keystores(&ImportKeystoresRequest { - keystores: vec![keystore], - passwords: vec![password], - slashing_protection: None, - }) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - let keypair = Keypair::random(); - client - .delete_keystores(&DeleteKeystoresRequest { - pubkeys: vec![keypair.pk.compress()], - }) - .await - }) - .await - }); +#[tokio::test] +async fn routes_with_invalid_auth() { + ApiTester::new() + .await + .test_with_invalid_auth(|client| async move { client.get_lighthouse_version().await }) + .await + .test_with_invalid_auth(|client| async move { client.get_lighthouse_health().await }) + .await + .test_with_invalid_auth(|client| async move { + client.get_lighthouse_spec::().await + }) + .await + .test_with_invalid_auth(|client| async move { client.get_lighthouse_validators().await }) + .await + .test_with_invalid_auth(|client| async move { + client + .get_lighthouse_validators_pubkey(&PublicKeyBytes::empty()) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + client + .post_lighthouse_validators(vec![ValidatorRequest { + enable: <_>::default(), + description: <_>::default(), + graffiti: <_>::default(), + suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), + deposit_gwei: <_>::default(), + }]) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + client + .post_lighthouse_validators_mnemonic(&CreateValidatorsMnemonicRequest { + mnemonic: String::default().into(), + key_derivation_path_offset: <_>::default(), + validators: <_>::default(), + }) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + let password = random_password(); + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, password.as_bytes(), String::new()) + .unwrap() + .build() + .unwrap(); + client + .post_lighthouse_validators_keystore(&KeystoreValidatorsPostRequest { + password: String::default().into(), + enable: <_>::default(), + keystore, + graffiti: <_>::default(), + suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), + }) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + client + .patch_lighthouse_validators( + &PublicKeyBytes::empty(), + Some(false), + None, + None, + None, + ) + .await + }) + .await + .test_with_invalid_auth(|client| async move { client.get_keystores().await }) + .await + .test_with_invalid_auth(|client| async move { + let password = random_password_string(); + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) + .unwrap() + .build() + .map(KeystoreJsonStr) + .unwrap(); + client + .post_keystores(&ImportKeystoresRequest { + keystores: vec![keystore], + passwords: vec![password], + slashing_protection: None, + }) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + let keypair = Keypair::random(); + client + .delete_keystores(&DeleteKeystoresRequest { + pubkeys: vec![keypair.pk.compress()], + }) + .await + }) + .await; } -#[test] -fn simple_getters() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .test_get_lighthouse_version() - .await - .test_get_lighthouse_health() - .await - .test_get_lighthouse_spec() - .await; - }); +#[tokio::test] +async fn simple_getters() { + ApiTester::new() + .await + .test_get_lighthouse_version() + .await + .test_get_lighthouse_health() + .await + .test_get_lighthouse_spec() + .await; } -#[test] -fn hd_validator_creation() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .assert_enabled_validators_count(0) - .assert_validators_count(0) - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: true, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .create_hd_validators(HdValidatorScenario { - count: 1, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![0], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(3) - .create_hd_validators(HdValidatorScenario { - count: 0, - specify_mnemonic: true, - key_derivation_path_offset: 4, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(3); - }); +#[tokio::test] +async fn hd_validator_creation() { + ApiTester::new() + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: true, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .create_hd_validators(HdValidatorScenario { + count: 1, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![0], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(3) + .create_hd_validators(HdValidatorScenario { + count: 0, + specify_mnemonic: true, + key_derivation_path_offset: 4, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(3); } -#[test] -fn validator_exit() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .test_sign_voluntary_exits(0, None) - .await - .test_sign_voluntary_exits(0, Some(Epoch::new(256))) - .await; - }); +#[tokio::test] +async fn validator_exit() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .test_sign_voluntary_exits(0, None) + .await + .test_sign_voluntary_exits(0, Some(Epoch::new(256))) + .await; } -#[test] -fn validator_enabling() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .set_validator_enabled(0, false) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2) - .set_validator_enabled(0, true) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2); - }); +#[tokio::test] +async fn validator_enabling() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2); } -#[test] -fn validator_gas_limit() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .set_gas_limit(0, 500) - .await - .assert_gas_limit(0, 500) - .await - // Update gas limit while validator is disabled. - .set_validator_enabled(0, false) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2) - .set_gas_limit(0, 1000) - .await - .set_validator_enabled(0, true) - .await - .assert_enabled_validators_count(2) - .assert_gas_limit(0, 1000) - .await - }); +#[tokio::test] +async fn validator_gas_limit() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_gas_limit(0, 500) + .await + .assert_gas_limit(0, 500) + .await + // Update gas limit while validator is disabled. + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_gas_limit(0, 1000) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_gas_limit(0, 1000) + .await; } -#[test] -fn validator_builder_proposals() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .set_builder_proposals(0, true) - .await - // Test setting builder proposals while the validator is disabled - .set_validator_enabled(0, false) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2) - .set_builder_proposals(0, false) - .await - .set_validator_enabled(0, true) - .await - .assert_enabled_validators_count(2) - .assert_builder_proposals(0, false) - .await - }); +#[tokio::test] +async fn validator_builder_proposals() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_builder_proposals(0, true) + .await + // Test setting builder proposals while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_builder_proposals(0, false) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_builder_proposals(0, false) + .await; } -#[test] -fn validator_graffiti() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .set_graffiti(0, "Mr F was here") - .await - .assert_graffiti(0, "Mr F was here") - .await - // Test setting graffiti while the validator is disabled - .set_validator_enabled(0, false) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2) - .set_graffiti(0, "Mr F was here again") - .await - .set_validator_enabled(0, true) - .await - .assert_enabled_validators_count(2) - .assert_graffiti(0, "Mr F was here again") - .await - }); +#[tokio::test] +async fn validator_graffiti() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here") + .await + .assert_graffiti(0, "Mr F was here") + .await + // Test setting graffiti while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here again") + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_graffiti(0, "Mr F was here again") + .await; } -#[test] -fn keystore_validator_creation() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .assert_enabled_validators_count(0) - .assert_validators_count(0) - .create_keystore_validators(KeystoreValidatorScenario { - correct_password: true, - enabled: true, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(1) - .create_keystore_validators(KeystoreValidatorScenario { - correct_password: false, - enabled: true, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(1) - .create_keystore_validators(KeystoreValidatorScenario { - correct_password: true, - enabled: false, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2); - }); +#[tokio::test] +async fn keystore_validator_creation() { + ApiTester::new() + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: true, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: false, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: true, + enabled: false, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2); } -#[test] -fn web3signer_validator_creation() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .assert_enabled_validators_count(0) - .assert_validators_count(0) - .create_web3signer_validators(Web3SignerValidatorScenario { - count: 1, - enabled: true, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(1); - }); +#[tokio::test] +async fn web3signer_validator_creation() { + ApiTester::new() + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_web3signer_validators(Web3SignerValidatorScenario { + count: 1, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1); } diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index 7120ee5f9f..d60872e497 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -12,6 +12,7 @@ use itertools::Itertools; use rand::{rngs::SmallRng, Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; use std::{collections::HashMap, path::Path}; +use tokio::runtime::Handle; use types::Address; fn new_keystore(password: ZeroizeString) -> Keystore { @@ -64,31 +65,23 @@ fn remotekey_validator_with_pubkey(pubkey: PublicKey) -> SingleImportRemotekeysR } } -fn run_test(f: F) +async fn run_test(f: F) where F: FnOnce(ApiTester) -> V, V: Future, { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - let tester = ApiTester::new(weak_runtime).await; - f(tester).await - }); + let tester = ApiTester::new().await; + f(tester).await } -fn run_dual_vc_test(f: F) +async fn run_dual_vc_test(f: F) where F: FnOnce(ApiTester, ApiTester) -> V, V: Future, { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - let tester1 = ApiTester::new(weak_runtime.clone()).await; - let tester2 = ApiTester::new(weak_runtime).await; - f(tester1, tester2).await - }); + let tester1 = ApiTester::new().await; + let tester2 = ApiTester::new().await; + f(tester1, tester2).await } fn keystore_pubkey(keystore: &Keystore) -> PublicKeyBytes { @@ -199,8 +192,8 @@ fn check_remotekey_delete_response( } } -#[test] -fn get_auth_no_token() { +#[tokio::test] +async fn get_auth_no_token() { run_test(|mut tester| async move { let _ = &tester; tester.client.send_authorization_header(false); @@ -213,19 +206,21 @@ fn get_auth_no_token() { // The token should match the one that the client was originally initialised with. assert!(tester.client.api_token() == Some(&token)); }) + .await; } -#[test] -fn get_empty_keystores() { +#[tokio::test] +async fn get_empty_keystores() { run_test(|tester| async move { let _ = &tester; let res = tester.client.get_keystores().await.unwrap(); assert_eq!(res, ListKeystoresResponse { data: vec![] }); }) + .await; } -#[test] -fn import_new_keystores() { +#[tokio::test] +async fn import_new_keystores() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -250,10 +245,11 @@ fn import_new_keystores() { let get_res = tester.client.get_keystores().await.unwrap(); check_keystore_get_response(&get_res, &keystores); }) + .await; } -#[test] -fn import_only_duplicate_keystores() { +#[tokio::test] +async fn import_only_duplicate_keystores() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -279,10 +275,11 @@ fn import_only_duplicate_keystores() { let get_res = tester.client.get_keystores().await.unwrap(); check_keystore_get_response(&get_res, &keystores); }) + .await; } -#[test] -fn import_some_duplicate_keystores() { +#[tokio::test] +async fn import_some_duplicate_keystores() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -330,10 +327,11 @@ fn import_some_duplicate_keystores() { let import_res = tester.client.post_keystores(&req2).await.unwrap(); check_keystore_import_response(&import_res, expected); }) + .await; } -#[test] -fn import_wrong_number_of_passwords() { +#[tokio::test] +async fn import_wrong_number_of_passwords() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -352,10 +350,11 @@ fn import_wrong_number_of_passwords() { .unwrap_err(); assert_eq!(err.status().unwrap(), 400); }) + .await; } -#[test] -fn get_web3_signer_keystores() { +#[tokio::test] +async fn get_web3_signer_keystores() { run_test(|tester| async move { let _ = &tester; let num_local = 3; @@ -412,10 +411,11 @@ fn get_web3_signer_keystores() { assert!(get_res.data.contains(&response), "{:?}", response); } }) + .await; } -#[test] -fn import_and_delete_conflicting_web3_signer_keystores() { +#[tokio::test] +async fn import_and_delete_conflicting_web3_signer_keystores() { run_test(|tester| async move { let _ = &tester; let num_keystores = 3; @@ -477,10 +477,11 @@ fn import_and_delete_conflicting_web3_signer_keystores() { let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); check_keystore_delete_response(&delete_res, all_delete_error(keystores.len())); }) + .await; } -#[test] -fn import_keystores_wrong_password() { +#[tokio::test] +async fn import_keystores_wrong_password() { run_test(|tester| async move { let _ = &tester; let num_keystores = 4; @@ -551,11 +552,12 @@ fn import_keystores_wrong_password() { &import_res, (0..num_keystores).map(|_| ImportKeystoreStatus::Duplicate), ); - }); + }) + .await; } -#[test] -fn import_invalid_slashing_protection() { +#[tokio::test] +async fn import_invalid_slashing_protection() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -589,10 +591,11 @@ fn import_invalid_slashing_protection() { let get_res = tester.client.get_keystores().await.unwrap(); check_keystore_get_response(&get_res, &[]); }) + .await; } -#[test] -fn check_get_set_fee_recipient() { +#[tokio::test] +async fn check_get_set_fee_recipient() { run_test(|tester: ApiTester| async move { let _ = &tester; let password = random_password_string(); @@ -768,10 +771,11 @@ fn check_get_set_fee_recipient() { ); } }) + .await; } -#[test] -fn check_get_set_gas_limit() { +#[tokio::test] +async fn check_get_set_gas_limit() { run_test(|tester: ApiTester| async move { let _ = &tester; let password = random_password_string(); @@ -943,14 +947,15 @@ fn check_get_set_gas_limit() { ); } }) + .await } fn all_indices(count: usize) -> Vec { (0..count).collect() } -#[test] -fn migrate_all_with_slashing_protection() { +#[tokio::test] +async fn migrate_all_with_slashing_protection() { let n = 3; generic_migration_test( n, @@ -967,11 +972,12 @@ fn migrate_all_with_slashing_protection() { (1, make_attestation(2, 3), false), (2, make_attestation(1, 2), false), ], - ); + ) + .await; } -#[test] -fn migrate_some_with_slashing_protection() { +#[tokio::test] +async fn migrate_some_with_slashing_protection() { let n = 3; generic_migration_test( n, @@ -989,11 +995,12 @@ fn migrate_some_with_slashing_protection() { (0, make_attestation(2, 3), true), (1, make_attestation(3, 4), true), ], - ); + ) + .await; } -#[test] -fn migrate_some_missing_slashing_protection() { +#[tokio::test] +async fn migrate_some_missing_slashing_protection() { let n = 3; generic_migration_test( n, @@ -1010,11 +1017,12 @@ fn migrate_some_missing_slashing_protection() { (1, make_attestation(2, 3), true), (0, make_attestation(2, 3), true), ], - ); + ) + .await; } -#[test] -fn migrate_some_extra_slashing_protection() { +#[tokio::test] +async fn migrate_some_extra_slashing_protection() { let n = 3; generic_migration_test( n, @@ -1033,7 +1041,8 @@ fn migrate_some_extra_slashing_protection() { (1, make_attestation(3, 4), true), (2, make_attestation(2, 3), false), ], - ); + ) + .await; } /// Run a test that creates some validators on one VC, and then migrates them to a second VC. @@ -1051,7 +1060,7 @@ fn migrate_some_extra_slashing_protection() { /// - `import_indices`: validators to transfer. It needn't be a subset of `delete_indices`. /// - `second_vc_attestations`: attestations to sign on the second VC after the transfer. The bool /// indicates whether the signing should be successful. -fn generic_migration_test( +async fn generic_migration_test( num_validators: usize, first_vc_attestations: Vec<(usize, Attestation)>, delete_indices: Vec, @@ -1169,11 +1178,12 @@ fn generic_migration_test( Err(e) => assert!(!should_succeed, "{:?}", e), } } - }); + }) + .await } -#[test] -fn delete_keystores_twice() { +#[tokio::test] +async fn delete_keystores_twice() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -1201,10 +1211,11 @@ fn delete_keystores_twice() { let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); check_keystore_delete_response(&delete_res, all_not_active(keystores.len())); }) + .await } -#[test] -fn delete_nonexistent_keystores() { +#[tokio::test] +async fn delete_nonexistent_keystores() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -1219,6 +1230,7 @@ fn delete_nonexistent_keystores() { let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); check_keystore_delete_response(&delete_res, all_not_found(keystores.len())); }) + .await } fn make_attestation(source_epoch: u64, target_epoch: u64) -> Attestation { @@ -1242,9 +1254,9 @@ fn make_attestation(source_epoch: u64, target_epoch: u64) -> Attestation { } } -#[test] -fn delete_concurrent_with_signing() { - let runtime = build_runtime(); +#[tokio::test] +async fn delete_concurrent_with_signing() { + let handle = Handle::try_current().unwrap(); let num_keys = 8; let num_signing_threads = 8; let num_attestations = 100; @@ -1257,115 +1269,112 @@ fn delete_concurrent_with_signing() { "num_keys should be divisible by num threads for simplicity" ); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - let tester = ApiTester::new(weak_runtime).await; + let tester = ApiTester::new().await; - // Generate a lot of keys and import them. - let password = random_password_string(); - let keystores = (0..num_keys) - .map(|_| new_keystore(password.clone())) - .collect::>(); - let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::>(); + // Generate a lot of keys and import them. + let password = random_password_string(); + let keystores = (0..num_keys) + .map(|_| new_keystore(password.clone())) + .collect::>(); + let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::>(); - let import_res = tester - .client - .post_keystores(&ImportKeystoresRequest { - keystores: keystores.clone(), - passwords: vec![password.clone(); keystores.len()], - slashing_protection: None, - }) - .await - .unwrap(); - check_keystore_import_response(&import_res, all_imported(keystores.len())); + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + check_keystore_import_response(&import_res, all_imported(keystores.len())); - // Start several threads signing attestations at sequential epochs. - let mut join_handles = vec![]; + // Start several threads signing attestations at sequential epochs. + let mut join_handles = vec![]; - for thread_index in 0..num_signing_threads { - let keys_per_thread = num_keys / num_signing_threads; - let validator_store = tester.validator_store.clone(); - let thread_pubkeys = all_pubkeys - [thread_index * keys_per_thread..(thread_index + 1) * keys_per_thread] - .to_vec(); + for thread_index in 0..num_signing_threads { + let keys_per_thread = num_keys / num_signing_threads; + let validator_store = tester.validator_store.clone(); + let thread_pubkeys = all_pubkeys + [thread_index * keys_per_thread..(thread_index + 1) * keys_per_thread] + .to_vec(); - let handle = runtime.spawn(async move { - for j in 0..num_attestations { - let mut att = make_attestation(j, j + 1); - for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { - let _ = validator_store - .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) - .await; - } + let handle = handle.spawn(async move { + for j in 0..num_attestations { + let mut att = make_attestation(j, j + 1); + for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { + let _ = validator_store + .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) + .await; } - }); - join_handles.push(handle); - } - - // Concurrently, delete each validator one at a time. Store the slashing protection - // data so we can ensure it doesn't change after a key is exported. - let mut delete_handles = vec![]; - for _ in 0..num_delete_threads { - let client = tester.client.clone(); - let all_pubkeys = all_pubkeys.clone(); - - let handle = runtime.spawn(async move { - let mut rng = SmallRng::from_entropy(); - - let mut slashing_protection = vec![]; - for _ in 0..num_delete_attempts { - let to_delete = all_pubkeys - .iter() - .filter(|_| rng.gen_bool(delete_prob)) - .copied() - .collect::>(); - - if !to_delete.is_empty() { - let delete_res = client - .delete_keystores(&DeleteKeystoresRequest { pubkeys: to_delete }) - .await - .unwrap(); - - for status in delete_res.data.iter() { - assert_ne!(status.status, DeleteKeystoreStatus::Error); - } - - slashing_protection.push(delete_res.slashing_protection); - } - } - slashing_protection - }); - - delete_handles.push(handle); - } - - // Collect slashing protection. - let mut slashing_protection_map = HashMap::new(); - let collected_slashing_protection = futures::future::join_all(delete_handles).await; - - for interchange in collected_slashing_protection - .into_iter() - .flat_map(Result::unwrap) - { - for validator_data in interchange.data { - slashing_protection_map - .entry(validator_data.pubkey) - .and_modify(|existing| { - assert_eq!( - *existing, validator_data, - "slashing protection data changed after first export" - ) - }) - .or_insert(validator_data); } - } + }); + join_handles.push(handle); + } - futures::future::join_all(join_handles).await - }); + // Concurrently, delete each validator one at a time. Store the slashing protection + // data so we can ensure it doesn't change after a key is exported. + let mut delete_handles = vec![]; + for _ in 0..num_delete_threads { + let client = tester.client.clone(); + let all_pubkeys = all_pubkeys.clone(); + + let handle = handle.spawn(async move { + let mut rng = SmallRng::from_entropy(); + + let mut slashing_protection = vec![]; + for _ in 0..num_delete_attempts { + let to_delete = all_pubkeys + .iter() + .filter(|_| rng.gen_bool(delete_prob)) + .copied() + .collect::>(); + + if !to_delete.is_empty() { + let delete_res = client + .delete_keystores(&DeleteKeystoresRequest { pubkeys: to_delete }) + .await + .unwrap(); + + for status in delete_res.data.iter() { + assert_ne!(status.status, DeleteKeystoreStatus::Error); + } + + slashing_protection.push(delete_res.slashing_protection); + } + } + slashing_protection + }); + + delete_handles.push(handle); + } + + // Collect slashing protection. + let mut slashing_protection_map = HashMap::new(); + let collected_slashing_protection = futures::future::join_all(delete_handles).await; + + for interchange in collected_slashing_protection + .into_iter() + .flat_map(Result::unwrap) + { + for validator_data in interchange.data { + slashing_protection_map + .entry(validator_data.pubkey) + .and_modify(|existing| { + assert_eq!( + *existing, validator_data, + "slashing protection data changed after first export" + ) + }) + .or_insert(validator_data); + } + } + + futures::future::join_all(join_handles).await; } -#[test] -fn delete_then_reimport() { +#[tokio::test] +async fn delete_then_reimport() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -1396,19 +1405,21 @@ fn delete_then_reimport() { let import_res = tester.client.post_keystores(&import_req).await.unwrap(); check_keystore_import_response(&import_res, all_imported(keystores.len())); }) + .await } -#[test] -fn get_empty_remotekeys() { +#[tokio::test] +async fn get_empty_remotekeys() { run_test(|tester| async move { let _ = &tester; let res = tester.client.get_remotekeys().await.unwrap(); assert_eq!(res, ListRemotekeysResponse { data: vec![] }); }) + .await } -#[test] -fn import_new_remotekeys() { +#[tokio::test] +async fn import_new_remotekeys() { run_test(|tester| async move { let _ = &tester; @@ -1443,10 +1454,11 @@ fn import_new_remotekeys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_same_remotekey_different_url() { +#[tokio::test] +async fn import_same_remotekey_different_url() { run_test(|tester| async move { let _ = &tester; @@ -1485,10 +1497,11 @@ fn import_same_remotekey_different_url() { }], ); }) + .await } -#[test] -fn delete_remotekey_then_reimport_different_url() { +#[tokio::test] +async fn delete_remotekey_then_reimport_different_url() { run_test(|tester| async move { let _ = &tester; @@ -1534,10 +1547,11 @@ fn delete_remotekey_then_reimport_different_url() { vec![ImportRemotekeyStatus::Imported].into_iter(), ); }) + .await } -#[test] -fn import_only_duplicate_remotekeys() { +#[tokio::test] +async fn import_only_duplicate_remotekeys() { run_test(|tester| async move { let _ = &tester; let remotekeys = (0..3) @@ -1582,10 +1596,11 @@ fn import_only_duplicate_remotekeys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_some_duplicate_remotekeys() { +#[tokio::test] +async fn import_some_duplicate_remotekeys() { run_test(|tester| async move { let _ = &tester; let num_remotekeys = 5; @@ -1649,10 +1664,11 @@ fn import_some_duplicate_remotekeys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_remote_and_local_keys() { +#[tokio::test] +async fn import_remote_and_local_keys() { run_test(|tester| async move { let _ = &tester; let num_local = 3; @@ -1714,10 +1730,11 @@ fn import_remote_and_local_keys() { assert!(get_res.data.contains(&response), "{:?}", response); } }) + .await } -#[test] -fn import_same_local_and_remote_keys() { +#[tokio::test] +async fn import_same_local_and_remote_keys() { run_test(|tester| async move { let _ = &tester; let num_local = 3; @@ -1782,9 +1799,10 @@ fn import_same_local_and_remote_keys() { assert!(get_res.data.contains(&response), "{:?}", response); } }) + .await } -#[test] -fn import_same_remote_and_local_keys() { +#[tokio::test] +async fn import_same_remote_and_local_keys() { run_test(|tester| async move { let _ = &tester; let num_local = 3; @@ -1847,10 +1865,11 @@ fn import_same_remote_and_local_keys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn delete_remotekeys_twice() { +#[tokio::test] +async fn delete_remotekeys_twice() { run_test(|tester| async move { let _ = &tester; @@ -1893,10 +1912,11 @@ fn delete_remotekeys_twice() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, Vec::new()); }) + .await } -#[test] -fn delete_nonexistent_remotekey() { +#[tokio::test] +async fn delete_nonexistent_remotekey() { run_test(|tester| async move { let _ = &tester; @@ -1919,10 +1939,11 @@ fn delete_nonexistent_remotekey() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, Vec::new()); }) + .await } -#[test] -fn delete_then_reimport_remotekeys() { +#[tokio::test] +async fn delete_then_reimport_remotekeys() { run_test(|tester| async move { let _ = &tester; @@ -1984,10 +2005,11 @@ fn delete_then_reimport_remotekeys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_remotekey_web3signer() { +#[tokio::test] +async fn import_remotekey_web3signer() { run_test(|tester| async move { let _ = &tester; @@ -2043,10 +2065,11 @@ fn import_remotekey_web3signer() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_remotekey_web3signer_disabled() { +#[tokio::test] +async fn import_remotekey_web3signer_disabled() { run_test(|tester| async move { let _ = &tester; @@ -2096,10 +2119,11 @@ fn import_remotekey_web3signer_disabled() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_remotekey_web3signer_enabled() { +#[tokio::test] +async fn import_remotekey_web3signer_enabled() { run_test(|tester| async move { let _ = &tester; @@ -2156,4 +2180,5 @@ fn import_remotekey_web3signer_enabled() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 090acbe969..f15ea27c9b 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -8,7 +8,7 @@ use crate::signing_method::SigningMethod; use account_utils::{ - read_password, read_password_from_user, + read_password, read_password_from_user, read_password_string, validator_definitions::{ self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, CONFIG_FILENAME, @@ -44,6 +44,19 @@ const DEFAULT_REMOTE_SIGNER_REQUEST_TIMEOUT: Duration = Duration::from_secs(12); // Use TTY instead of stdin to capture passwords from users. const USE_STDIN: bool = false; +pub enum OnDecryptFailure { + /// If the key cache fails to decrypt, create a new cache. + CreateNew, + /// Return an error if the key cache fails to decrypt. This should only be + /// used in testing. + Error, +} + +pub struct KeystoreAndPassword { + pub keystore: Keystore, + pub password: Option, +} + #[derive(Debug)] pub enum Error { /// Refused to open a validator with an existing lockfile since that validator may be in-use by @@ -98,6 +111,11 @@ pub enum Error { UnableToBuildWeb3SignerClient(ReqwestError), /// Unable to apply an action to a validator. InvalidActionOnValidator, + UnableToReadValidatorPassword(String), + UnableToReadKeystoreFile(eth2_keystore::Error), + UnableToSaveKeyCache(key_cache::Error), + UnableToDecryptKeyCache(key_cache::Error), + UnableToDeletePasswordFile(PathBuf, io::Error), } impl From for Error { @@ -539,33 +557,78 @@ impl InitializedValidators { &mut self, pubkey: &PublicKey, is_local_keystore: bool, - ) -> Result<(), Error> { + ) -> Result, Error> { // 1. Disable the validator definition. // // We disable before removing so that in case of a crash the auto-discovery mechanism // won't re-activate the keystore. - if let Some(def) = self + let mut uuid_opt = None; + let mut password_path_opt = None; + let keystore_and_password = if let Some(def) = self .definitions .as_mut_slice() .iter_mut() .find(|def| &def.voting_public_key == pubkey) { - // Update definition for local keystore - if def.signing_definition.is_local_keystore() && is_local_keystore { - def.enabled = false; - self.definitions - .save(&self.validators_dir) - .map_err(Error::UnableToSaveDefinitions)?; - } else if !def.signing_definition.is_local_keystore() && !is_local_keystore { - def.enabled = false; - } else { - return Err(Error::InvalidActionOnValidator); + match &def.signing_definition { + SigningDefinition::LocalKeystore { + voting_keystore_path, + voting_keystore_password, + voting_keystore_password_path, + .. + } if is_local_keystore => { + let password = match (voting_keystore_password, voting_keystore_password_path) { + (Some(password), _) => Some(password.clone()), + (_, Some(path)) => { + password_path_opt = Some(path.clone()); + read_password_string(path) + .map(Option::Some) + .map_err(Error::UnableToReadValidatorPassword)? + } + (None, None) => None, + }; + let keystore = Keystore::from_json_file(voting_keystore_path) + .map_err(Error::UnableToReadKeystoreFile)?; + uuid_opt = Some(*keystore.uuid()); + + def.enabled = false; + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Some(KeystoreAndPassword { keystore, password }) + } + SigningDefinition::Web3Signer(_) if !is_local_keystore => { + def.enabled = false; + None + } + _ => return Err(Error::InvalidActionOnValidator), } } else { return Err(Error::ValidatorNotInitialized(pubkey.clone())); + }; + + // 2. Remove the validator from the key cache. This ensures the key + // cache is consistent next time the VC starts. + // + // It's not a big deal if this succeeds and something fails later in + // this function because the VC will self-heal from a corrupt key cache. + // + // Do this before modifying `self.validators` or deleting anything from + // the filesystem. + if let Some(uuid) = uuid_opt { + let key_cache = KeyCache::open_or_create(&self.validators_dir) + .map_err(Error::UnableToOpenKeyCache)?; + let mut decrypted_key_cache = self + .decrypt_key_cache(key_cache, &mut <_>::default(), OnDecryptFailure::CreateNew) + .await?; + decrypted_key_cache.remove(&uuid); + decrypted_key_cache + .save(&self.validators_dir) + .map_err(Error::UnableToSaveKeyCache)?; } - // 2. Delete from `self.validators`, which holds the signing method. + // 3. Delete from `self.validators`, which holds the signing method. // Delete the keystore files. if let Some(initialized_validator) = self.validators.remove(&pubkey.compress()) { if let SigningMethod::LocalKeystore { @@ -583,14 +646,28 @@ impl InitializedValidators { } } - // 3. Delete from validator definitions entirely. + // 4. Delete from validator definitions entirely. self.definitions .retain(|def| &def.voting_public_key != pubkey); self.definitions .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; - Ok(()) + // 5. Delete the keystore password if it's not being used by any definition. + if let Some(password_path) = password_path_opt.and_then(|p| p.canonicalize().ok()) { + if self + .definitions + .iter_voting_keystore_password_paths() + // Require canonicalized paths so we can do a true equality check. + .filter_map(|existing| existing.canonicalize().ok()) + .all(|existing| existing != password_path) + { + fs::remove_file(&password_path) + .map_err(|e| Error::UnableToDeletePasswordFile(password_path, e))?; + } + } + + Ok(keystore_and_password) } /// Attempt to delete the voting keystore file, or its entire validator directory. @@ -900,10 +977,11 @@ impl InitializedValidators { /// filesystem accesses for keystores that are already known. In the case that a keystore /// from the validator definitions is not yet in this map, it will be loaded from disk and /// inserted into the map. - async fn decrypt_key_cache( + pub async fn decrypt_key_cache( &self, mut cache: KeyCache, key_stores: &mut HashMap, + on_failure: OnDecryptFailure, ) -> Result { // Read relevant key stores from the filesystem. let mut definitions_map = HashMap::new(); @@ -971,11 +1049,13 @@ impl InitializedValidators { //decrypt tokio::task::spawn_blocking(move || match cache.decrypt(passwords, public_keys) { - Ok(_) | Err(key_cache::Error::AlreadyDecrypted) => cache, - _ => KeyCache::new(), + Ok(_) | Err(key_cache::Error::AlreadyDecrypted) => Ok(cache), + _ if matches!(on_failure, OnDecryptFailure::CreateNew) => Ok(KeyCache::new()), + Err(e) => Err(e), }) .await - .map_err(Error::TokioJoin) + .map_err(Error::TokioJoin)? + .map_err(Error::UnableToDecryptKeyCache) } /// Scans `self.definitions` and attempts to initialize and validators which are not already @@ -1013,7 +1093,8 @@ impl InitializedValidators { // Only decrypt cache when there is at least one local definition. // Decrypting cache is a very expensive operation which is never used for web3signer. let mut key_cache = if has_local_definitions { - self.decrypt_key_cache(cache, &mut key_stores).await? + self.decrypt_key_cache(cache, &mut key_stores, OnDecryptFailure::CreateNew) + .await? } else { // Assign an empty KeyCache if all definitions are of the Web3Signer type. KeyCache::new() @@ -1191,4 +1272,41 @@ impl InitializedValidators { val.index = Some(index); } } + + /// Deletes any passwords stored in the validator definitions file and + /// returns a map of pubkey to deleted password. + /// + /// This should only be used for testing, it's rather destructive. + pub fn delete_passwords_from_validator_definitions( + &mut self, + ) -> Result, Error> { + let mut passwords = HashMap::default(); + + for def in self.definitions.as_mut_slice() { + match &mut def.signing_definition { + SigningDefinition::LocalKeystore { + ref mut voting_keystore_password, + .. + } => { + if let Some(password) = voting_keystore_password.take() { + passwords.insert(def.voting_public_key.clone(), password); + } + } + // Remote signers don't have passwords. + SigningDefinition::Web3Signer { .. } => (), + }; + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(passwords) + } + + /// Prefer other methods in production. Arbitrarily modifying a validator + /// definition manually may result in inconsistencies. + pub fn as_mut_slice_testing_only(&mut self) -> &mut [ValidatorDefinition] { + self.definitions.as_mut_slice() + } } diff --git a/validator_client/src/key_cache.rs b/validator_client/src/key_cache.rs index b7abaaed06..c2dd7aa8fe 100644 --- a/validator_client/src/key_cache.rs +++ b/validator_client/src/key_cache.rs @@ -47,6 +47,12 @@ pub struct KeyCache { type SerializedKeyMap = HashMap; +impl Default for KeyCache { + fn default() -> Self { + Self::new() + } +} + impl KeyCache { pub fn new() -> Self { KeyCache { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 60943a260c..f7a80f0a8e 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -172,9 +172,12 @@ impl ProductionValidatorClient { let new_validators = validator_defs .discover_local_keystores(&config.validator_dir, &config.secrets_dir, &log) .map_err(|e| format!("Unable to discover local validator keystores: {:?}", e))?; - validator_defs - .save(&config.validator_dir) - .map_err(|e| format!("Unable to update validator definitions: {:?}", e))?; + validator_defs.save(&config.validator_dir).map_err(|e| { + format!( + "Provide --suggested-fee-recipient or update validator definitions: {:?}", + e + ) + })?; info!( log, "Completed validator discovery"; @@ -573,6 +576,7 @@ impl ProductionValidatorClient { api_secret, validator_store: Some(self.validator_store.clone()), validator_dir: Some(self.config.validator_dir.clone()), + secrets_dir: Some(self.config.secrets_dir.clone()), graffiti_file: self.config.graffiti_file.clone(), graffiti_flag: self.config.graffiti, spec: self.context.eth2_config.spec.clone(), diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 732ae68ff8..909e64a78a 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -94,8 +94,7 @@ async fn notify( info!( log, "No validators present"; - "msg" => "see `lighthouse account validator create --help` \ - or the HTTP API documentation" + "msg" => "see `lighthouse vm create --help` or the HTTP API documentation" ) } else if total_validators == attesting_validators { info!( diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 7d6e1744c8..2d2221680f 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -1,5 +1,5 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::validator_store::{DoppelgangerStatus, ValidatorStore}; +use crate::validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; use crate::OfflineOnFailure; use bls::PublicKeyBytes; use environment::RuntimeContext; @@ -442,8 +442,23 @@ impl PreparationService { .await { Ok(data) => data, + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for registration data"; + "pubkey" => ?pubkey, + ); + continue; + } Err(e) => { - error!(log, "Unable to sign validator registration data"; "error" => ?e, "pubkey" => ?pubkey); + error!( + log, + "Unable to sign validator registration data"; + "error" => ?e, + "pubkey" => ?pubkey + ); continue; } }; diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index cc20cedfc6..e01bf09cf2 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -1,5 +1,9 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::{duties_service::DutiesService, validator_store::ValidatorStore, OfflineOnFailure}; +use crate::{ + duties_service::DutiesService, + validator_store::{Error as ValidatorStoreError, ValidatorStore}, + OfflineOnFailure, +}; use environment::RuntimeContext; use eth2::types::BlockId; use futures::future::join_all; @@ -264,6 +268,18 @@ impl SyncCommitteeService { .await { Ok(signature) => Some(signature), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for sync committee signature"; + "pubkey" => ?pubkey, + "validator_index" => duty.validator_index, + "slot" => slot, + ); + None + } Err(e) => { crit!( log, @@ -405,6 +421,17 @@ impl SyncCommitteeService { .await { Ok(signed_contribution) => Some(signed_contribution), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for sync contribution"; + "pubkey" => ?pubkey, + "slot" => slot, + ); + None + } Err(e) => { crit!( log, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 73843579a2..365f7f7347 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -5,7 +5,7 @@ use crate::{ signing_method::{Error as SigningError, SignableMessage, SigningContext, SigningMethod}, Config, }; -use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; +use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; use parking_lot::{Mutex, RwLock}; use slashing_protection::{ interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, @@ -170,7 +170,7 @@ impl ValidatorStore { pub async fn add_validator_keystore>( &self, voting_keystore_path: P, - password: ZeroizeString, + password_storage: PasswordStorage, enable: bool, graffiti: Option, suggested_fee_recipient: Option
, @@ -179,7 +179,7 @@ impl ValidatorStore { ) -> Result { let mut validator_def = ValidatorDefinition::new_keystore_with_password( voting_keystore_path, - Some(password), + password_storage, graffiti.map(Into::into), suggested_fee_recipient, gas_limit, diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml new file mode 100644 index 0000000000..851510820e --- /dev/null +++ b/validator_manager/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "validator_manager" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bls = { path = "../crypto/bls" } +clap = "2.33.3" +types = { path = "../consensus/types" } +environment = { path = "../lighthouse/environment" } +eth2_network_config = { path = "../common/eth2_network_config" } +clap_utils = { path = "../common/clap_utils" } +eth2_wallet = { path = "../crypto/eth2_wallet" } +eth2_keystore = { path = "../crypto/eth2_keystore" } +account_utils = { path = "../common/account_utils" } +serde = { version = "1.0.116", features = ["derive"] } +serde_json = "1.0.58" +ethereum_serde_utils = "0.5.0" +tree_hash = "0.5.0" +eth2 = { path = "../common/eth2", features = ["lighthouse"]} +hex = "0.4.2" +tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } + +[dev-dependencies] +tempfile = "3.1.0" +regex = "1.6.0" +eth2_network_config = { path = "../common/eth2_network_config" } +validator_client = { path = "../validator_client" } diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs new file mode 100644 index 0000000000..6a3f93a3f7 --- /dev/null +++ b/validator_manager/src/common.rs @@ -0,0 +1,361 @@ +use account_utils::{strip_off_newlines, ZeroizeString}; +use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; +use eth2::{ + lighthouse_vc::{ + http_client::ValidatorClientHttpClient, + std_types::{ImportKeystoreStatus, ImportKeystoresRequest, SingleKeystoreResponse, Status}, + types::UpdateFeeRecipientRequest, + }, + SensitiveUrl, +}; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::{Path, PathBuf}; +use tree_hash::TreeHash; +use types::*; + +pub const IGNORE_DUPLICATES_FLAG: &str = "ignore-duplicates"; +pub const STDIN_INPUTS_FLAG: &str = "stdin-inputs"; +pub const COUNT_FLAG: &str = "count"; + +/// When the `ethereum/staking-deposit-cli` tool generates deposit data JSON, it adds a +/// `deposit_cli_version` to protect the web-based "Launchpad" tool against a breaking change that +/// was introduced in `ethereum/staking-deposit-cli`. Lighthouse don't really have a version that it +/// can use here, so we choose a static string that is: +/// +/// 1. High enough that it's accepted by Launchpad. +/// 2. Weird enough to identify Lighthouse. +const LIGHTHOUSE_DEPOSIT_CLI_VERSION: &str = "20.18.20"; + +#[derive(Debug)] +pub enum UploadError { + InvalidPublicKey, + DuplicateValidator(PublicKeyBytes), + FailedToListKeys(eth2::Error), + KeyUploadFailed(eth2::Error), + IncorrectStatusCount(usize), + FeeRecipientUpdateFailed(eth2::Error), + PatchValidatorFailed(eth2::Error), +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorSpecification { + pub voting_keystore: KeystoreJsonStr, + pub voting_keystore_password: ZeroizeString, + pub slashing_protection: Option, + pub fee_recipient: Option
, + pub gas_limit: Option, + pub builder_proposals: Option, + pub enabled: Option, +} + +impl ValidatorSpecification { + /// Upload the validator to a validator client via HTTP. + pub async fn upload( + self, + http_client: &ValidatorClientHttpClient, + ignore_duplicates: bool, + ) -> Result, UploadError> { + let ValidatorSpecification { + voting_keystore, + voting_keystore_password, + slashing_protection, + fee_recipient, + gas_limit, + builder_proposals, + enabled, + } = self; + + let voting_public_key = voting_keystore + .public_key() + .ok_or(UploadError::InvalidPublicKey)? + .into(); + + let request = ImportKeystoresRequest { + keystores: vec![voting_keystore], + passwords: vec![voting_keystore_password], + slashing_protection, + }; + + // Check to see if this validator already exists on the remote validator. + match http_client.get_keystores().await { + Ok(response) => { + if response + .data + .iter() + .any(|validator| validator.validating_pubkey == voting_public_key) + { + if ignore_duplicates { + eprintln!( + "Duplicate validators are ignored, ignoring {:?} which exists \ + on the destination validator client", + voting_public_key + ); + } else { + return Err(UploadError::DuplicateValidator(voting_public_key)); + } + } + } + Err(e) => { + return Err(UploadError::FailedToListKeys(e)); + } + }; + + let mut statuses = http_client + .post_keystores(&request) + .await + .map_err(UploadError::KeyUploadFailed)? + .data; + + let status = statuses.pop().ok_or(UploadError::IncorrectStatusCount(0))?; + if !statuses.is_empty() { + return Err(UploadError::IncorrectStatusCount(statuses.len() + 1)); + } + + // Exit early if there's an error uploading. + if status.status == ImportKeystoreStatus::Error { + return Ok(status); + } + + if let Some(fee_recipient) = fee_recipient { + http_client + .post_fee_recipient( + &voting_public_key, + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient, + }, + ) + .await + .map_err(UploadError::FeeRecipientUpdateFailed)?; + } + + if gas_limit.is_some() || builder_proposals.is_some() || enabled.is_some() { + http_client + .patch_lighthouse_validators( + &voting_public_key, + enabled, + gas_limit, + builder_proposals, + None, // Grafitti field is not maintained between validator moves. + ) + .await + .map_err(UploadError::PatchValidatorFailed)?; + } + + Ok(status) + } +} + +#[derive(Serialize, Deserialize)] +pub struct CreateSpec { + pub mnemonic: String, + pub validator_client_url: Option, + pub validator_client_token_path: Option, + pub json_deposit_data_path: Option, + pub ignore_duplicates: bool, + pub validators: Vec, +} + +/// The structure generated by the `staking-deposit-cli` which has become a quasi-standard for +/// browser-based deposit submission tools (e.g., the Ethereum Launchpad and Lido). +/// +/// We assume this code as the canonical definition: +/// +/// https://github.com/ethereum/staking-deposit-cli/blob/76ed78224fdfe3daca788d12442b3d1a37978296/staking_deposit/credentials.py#L131-L144 +#[derive(Debug, PartialEq, Serialize, Deserialize)] +pub struct StandardDepositDataJson { + #[serde(with = "public_key_bytes_without_0x_prefix")] + pub pubkey: PublicKeyBytes, + #[serde(with = "hash256_without_0x_prefix")] + pub withdrawal_credentials: Hash256, + /// The `amount` field is *not* quoted (i.e., a string) like most other `u64` fields in the + /// consensus specs, it's a simple integer. + pub amount: u64, + #[serde(with = "signature_bytes_without_0x_prefix")] + pub signature: SignatureBytes, + #[serde(with = "bytes_4_without_0x_prefix")] + pub fork_version: [u8; 4], + pub network_name: String, + #[serde(with = "hash256_without_0x_prefix")] + pub deposit_message_root: Hash256, + #[serde(with = "hash256_without_0x_prefix")] + pub deposit_data_root: Hash256, + pub deposit_cli_version: String, +} + +impl StandardDepositDataJson { + pub fn new( + keypair: &Keypair, + withdrawal_credentials: Hash256, + amount: u64, + spec: &ChainSpec, + ) -> Result { + let deposit_data = { + let mut deposit_data = DepositData { + pubkey: keypair.pk.clone().into(), + withdrawal_credentials, + amount, + signature: SignatureBytes::empty(), + }; + deposit_data.signature = deposit_data.create_signature(&keypair.sk, spec); + deposit_data + }; + + let deposit_message_root = deposit_data.as_deposit_message().tree_hash_root(); + let deposit_data_root = deposit_data.tree_hash_root(); + + let DepositData { + pubkey, + withdrawal_credentials, + amount, + signature, + } = deposit_data; + + Ok(Self { + pubkey, + withdrawal_credentials, + amount, + signature, + fork_version: spec.genesis_fork_version, + network_name: spec + .config_name + .clone() + .ok_or("The network specification does not have a CONFIG_NAME set")?, + deposit_message_root, + deposit_data_root, + deposit_cli_version: LIGHTHOUSE_DEPOSIT_CLI_VERSION.to_string(), + }) + } +} + +macro_rules! without_0x_prefix { + ($mod_name: ident, $type: ty) => { + pub mod $mod_name { + use super::*; + use std::str::FromStr; + + struct Visitor; + + impl<'de> serde::de::Visitor<'de> for Visitor { + type Value = $type; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("ascii hex without a 0x prefix") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + <$type>::from_str(&format!("0x{}", v)).map_err(serde::de::Error::custom) + } + } + + /// Serialize with quotes. + pub fn serialize(value: &$type, serializer: S) -> Result + where + S: serde::Serializer, + { + let with_prefix = format!("{:?}", value); + let without_prefix = with_prefix + .strip_prefix("0x") + .ok_or_else(|| serde::ser::Error::custom("serialization is missing 0x"))?; + serializer.serialize_str(&without_prefix) + } + + /// Deserialize with quotes. + pub fn deserialize<'de, D>(deserializer: D) -> Result<$type, D::Error> + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(Visitor) + } + } + }; +} + +without_0x_prefix!(hash256_without_0x_prefix, Hash256); +without_0x_prefix!(signature_bytes_without_0x_prefix, SignatureBytes); +without_0x_prefix!(public_key_bytes_without_0x_prefix, PublicKeyBytes); + +mod bytes_4_without_0x_prefix { + use serde::de::Error; + + const BYTES_LEN: usize = 4; + + pub fn serialize(bytes: &[u8; BYTES_LEN], serializer: S) -> Result + where + S: serde::Serializer, + { + let hex_string = &hex::encode(bytes); + serializer.serialize_str(hex_string) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> + where + D: serde::Deserializer<'de>, + { + let decoded = deserializer.deserialize_str(serde_utils::hex::HexVisitor)?; + + if decoded.len() != BYTES_LEN { + return Err(D::Error::custom(format!( + "expected {} bytes for array, got {}", + BYTES_LEN, + decoded.len() + ))); + } + + let mut array = [0; BYTES_LEN]; + array.copy_from_slice(&decoded); + Ok(array) + } +} + +pub async fn vc_http_client>( + url: SensitiveUrl, + token_path: P, +) -> Result<(ValidatorClientHttpClient, Vec), String> { + let token_path = token_path.as_ref(); + let token_bytes = + fs::read(token_path).map_err(|e| format!("Failed to read {:?}: {:?}", token_path, e))?; + let token_string = String::from_utf8(strip_off_newlines(token_bytes)) + .map_err(|e| format!("Failed to parse {:?} as utf8: {:?}", token_path, e))?; + let http_client = ValidatorClientHttpClient::new(url.clone(), token_string).map_err(|e| { + format!( + "Could not instantiate HTTP client from URL and secret: {:?}", + e + ) + })?; + + // Perform a request to check that the connection works + let remote_keystores = http_client + .get_keystores() + .await + .map_err(|e| format!("Failed to list keystores on VC: {:?}", e))? + .data; + + eprintln!( + "Validator client is reachable at {} and reports {} validators", + url, + remote_keystores.len() + ); + + Ok((http_client, remote_keystores)) +} + +/// Write some object to a file as JSON. +/// +/// The file must be created new, it must not already exist. +pub fn write_to_json_file, S: Serialize>( + path: P, + contents: &S, +) -> Result<(), String> { + eprintln!("Writing {:?}", path.as_ref()); + let mut file = fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(&path) + .map_err(|e| format!("Failed to open {:?}: {:?}", path.as_ref(), e))?; + serde_json::to_writer(&mut file, contents) + .map_err(|e| format!("Failed to write JSON to {:?}: {:?}", path.as_ref(), e)) +} diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs new file mode 100644 index 0000000000..8ea740ff5b --- /dev/null +++ b/validator_manager/src/create_validators.rs @@ -0,0 +1,934 @@ +use super::common::*; +use crate::DumpConfig; +use account_utils::{random_password_string, read_mnemonic_from_cli, read_password_from_user}; +use clap::{App, Arg, ArgMatches}; +use eth2::{ + lighthouse_vc::std_types::KeystoreJsonStr, + types::{StateId, ValidatorId}, + BeaconNodeHttpClient, SensitiveUrl, Timeouts, +}; +use eth2_wallet::WalletBuilder; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::PathBuf; +use std::time::Duration; +use types::*; + +pub const CMD: &str = "create"; +pub const OUTPUT_PATH_FLAG: &str = "output-path"; +pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei"; +pub const DISABLE_DEPOSITS_FLAG: &str = "disable-deposits"; +pub const FIRST_INDEX_FLAG: &str = "first-index"; +pub const MNEMONIC_FLAG: &str = "mnemonic-path"; +pub const SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG: &str = "specify-voting-keystore-password"; +pub const ETH1_WITHDRAWAL_ADDRESS_FLAG: &str = "eth1-withdrawal-address"; +pub const GAS_LIMIT_FLAG: &str = "gas-limit"; +pub const FEE_RECIPIENT_FLAG: &str = "suggested-fee-recipient"; +pub const BUILDER_PROPOSALS_FLAG: &str = "builder-proposals"; +pub const BEACON_NODE_FLAG: &str = "beacon-node"; +pub const FORCE_BLS_WITHDRAWAL_CREDENTIALS: &str = "force-bls-withdrawal-credentials"; + +pub const VALIDATORS_FILENAME: &str = "validators.json"; +pub const DEPOSITS_FILENAME: &str = "deposits.json"; + +const BEACON_NODE_HTTP_TIMEOUT: Duration = Duration::from_secs(2); + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new(CMD) + .about( + "Creates new validators from BIP-39 mnemonic. A JSON file will be created which \ + contains all the validator keystores and other validator data. This file can then \ + be imported to a validator client using the \"import-validators\" command. \ + Another, optional JSON file is created which contains a list of validator \ + deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", + ) + .arg( + Arg::with_name(OUTPUT_PATH_FLAG) + .long(OUTPUT_PATH_FLAG) + .value_name("DIRECTORY") + .help( + "The path to a directory where the validator and (optionally) deposits \ + files will be created. The directory will be created if it does not exist.", + ) + .required(true) + .takes_value(true), + ) + .arg( + Arg::with_name(DEPOSIT_GWEI_FLAG) + .long(DEPOSIT_GWEI_FLAG) + .value_name("DEPOSIT_GWEI") + .help( + "The GWEI value of the deposit amount. Defaults to the minimum amount \ + required for an active validator (MAX_EFFECTIVE_BALANCE)", + ) + .conflicts_with(DISABLE_DEPOSITS_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(FIRST_INDEX_FLAG) + .long(FIRST_INDEX_FLAG) + .value_name("FIRST_INDEX") + .help("The first of consecutive key indexes you wish to create.") + .takes_value(true) + .required(false) + .default_value("0"), + ) + .arg( + Arg::with_name(COUNT_FLAG) + .long(COUNT_FLAG) + .value_name("VALIDATOR_COUNT") + .help("The number of validators to create, regardless of how many already exist") + .conflicts_with("at-most") + .takes_value(true), + ) + .arg( + Arg::with_name(MNEMONIC_FLAG) + .long(MNEMONIC_FLAG) + .value_name("MNEMONIC_PATH") + .help("If present, the mnemonic will be read in from this file.") + .takes_value(true), + ) + .arg( + Arg::with_name(STDIN_INPUTS_FLAG) + .takes_value(false) + .hidden(cfg!(windows)) + .long(STDIN_INPUTS_FLAG) + .help("If present, read all user inputs from stdin instead of tty."), + ) + .arg( + Arg::with_name(DISABLE_DEPOSITS_FLAG) + .long(DISABLE_DEPOSITS_FLAG) + .help( + "When provided don't generate the deposits JSON file that is \ + commonly used for submitting validator deposits via a web UI. \ + Using this flag will save several seconds per validator if the \ + user has an alternate strategy for submitting deposits.", + ), + ) + .arg( + Arg::with_name(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) + .long(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) + .help( + "If present, the user will be prompted to enter the voting keystore \ + password that will be used to encrypt the voting keystores. If this \ + flag is not provided, a random password will be used. It is not \ + necessary to keep backups of voting keystore passwords if the \ + mnemonic is safely backed up.", + ), + ) + .arg( + Arg::with_name(ETH1_WITHDRAWAL_ADDRESS_FLAG) + .long(ETH1_WITHDRAWAL_ADDRESS_FLAG) + .value_name("ETH1_ADDRESS") + .help( + "If this field is set, the given eth1 address will be used to create the \ + withdrawal credentials. Otherwise, it will generate withdrawal credentials \ + with the mnemonic-derived withdrawal public key in EIP-2334 format.", + ) + .conflicts_with(DISABLE_DEPOSITS_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(GAS_LIMIT_FLAG) + .long(GAS_LIMIT_FLAG) + .value_name("UINT64") + .help( + "All created validators will use this gas limit. It is recommended \ + to leave this as the default value by not specifying this flag.", + ) + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name(FEE_RECIPIENT_FLAG) + .long(FEE_RECIPIENT_FLAG) + .value_name("ETH1_ADDRESS") + .help( + "All created validators will use this value for the suggested \ + fee recipient. Omit this flag to use the default value from the VC.", + ) + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name(BUILDER_PROPOSALS_FLAG) + .long(BUILDER_PROPOSALS_FLAG) + .help( + "When provided, all created validators will attempt to create \ + blocks via builder rather than the local EL.", + ) + .required(false) + .possible_values(&["true", "false"]) + .takes_value(true), + ) + .arg( + Arg::with_name(BEACON_NODE_FLAG) + .long(BEACON_NODE_FLAG) + .value_name("HTTP_ADDRESS") + .help( + "A HTTP(S) address of a beacon node using the beacon-API. \ + If this value is provided, an error will be raised if any validator \ + key here is already known as a validator by that beacon node. This helps \ + prevent the same validator being created twice and therefore slashable \ + conditions.", + ) + .takes_value(true), + ) + .arg( + Arg::with_name(FORCE_BLS_WITHDRAWAL_CREDENTIALS) + .takes_value(false) + .long(FORCE_BLS_WITHDRAWAL_CREDENTIALS) + .help( + "If present, allows BLS withdrawal credentials rather than an execution \ + address. This is not recommended.", + ), + ) +} + +/// The CLI arguments are parsed into this struct before running the application. This step of +/// indirection allows for testing the underlying logic without needing to parse CLI arguments. +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct CreateConfig { + pub output_path: PathBuf, + pub first_index: u32, + pub count: u32, + pub deposit_gwei: u64, + pub mnemonic_path: Option, + pub stdin_inputs: bool, + pub disable_deposits: bool, + pub specify_voting_keystore_password: bool, + pub eth1_withdrawal_address: Option
, + pub builder_proposals: Option, + pub fee_recipient: Option
, + pub gas_limit: Option, + pub bn_url: Option, + pub force_bls_withdrawal_credentials: bool, +} + +impl CreateConfig { + fn from_cli(matches: &ArgMatches, spec: &ChainSpec) -> Result { + Ok(Self { + output_path: clap_utils::parse_required(matches, OUTPUT_PATH_FLAG)?, + deposit_gwei: clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)? + .unwrap_or(spec.max_effective_balance), + first_index: clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?, + count: clap_utils::parse_required(matches, COUNT_FLAG)?, + mnemonic_path: clap_utils::parse_optional(matches, MNEMONIC_FLAG)?, + stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), + disable_deposits: matches.is_present(DISABLE_DEPOSITS_FLAG), + specify_voting_keystore_password: matches + .is_present(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG), + eth1_withdrawal_address: clap_utils::parse_optional( + matches, + ETH1_WITHDRAWAL_ADDRESS_FLAG, + )?, + builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS_FLAG)?, + fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, + gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, + bn_url: clap_utils::parse_optional(matches, BEACON_NODE_FLAG)?, + force_bls_withdrawal_credentials: matches.is_present(FORCE_BLS_WITHDRAWAL_CREDENTIALS), + }) + } +} + +struct ValidatorsAndDeposits { + validators: Vec, + deposits: Option>, +} + +impl ValidatorsAndDeposits { + async fn new<'a, T: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result { + let CreateConfig { + // The output path is handled upstream. + output_path: _, + first_index, + count, + deposit_gwei, + mnemonic_path, + stdin_inputs, + disable_deposits, + specify_voting_keystore_password, + eth1_withdrawal_address, + builder_proposals, + fee_recipient, + gas_limit, + bn_url, + force_bls_withdrawal_credentials, + } = config; + + // Since Capella, it really doesn't make much sense to use BLS + // withdrawal credentials. Try to guide users away from doing so. + if eth1_withdrawal_address.is_none() && !force_bls_withdrawal_credentials { + return Err(format!( + "--{ETH1_WITHDRAWAL_ADDRESS_FLAG} is required. See --help for more information." + )); + } + + if count == 0 { + return Err(format!("--{} cannot be 0", COUNT_FLAG)); + } + + let bn_http_client = if let Some(bn_url) = bn_url { + let bn_http_client = + BeaconNodeHttpClient::new(bn_url, Timeouts::set_all(BEACON_NODE_HTTP_TIMEOUT)); + + /* + * Print the version of the remote beacon node. + */ + let version = bn_http_client + .get_node_version() + .await + .map_err(|e| format!("Failed to test connection to beacon node: {:?}", e))? + .data + .version; + eprintln!("Connected to beacon node running version {}", version); + + /* + * Attempt to ensure that the beacon node is on the same network. + */ + let bn_config = bn_http_client + .get_config_spec::() + .await + .map_err(|e| format!("Failed to get spec from beacon node: {:?}", e))? + .data; + if let Some(config_name) = &bn_config.config_name { + eprintln!("Beacon node is on {} network", config_name) + } + let bn_spec = bn_config + .apply_to_chain_spec::(&T::default_spec()) + .ok_or("Beacon node appears to be on an incorrect network")?; + if bn_spec.genesis_fork_version != spec.genesis_fork_version { + if let Some(config_name) = bn_spec.config_name { + eprintln!("Beacon node is on {} network", config_name) + } + return Err("Beacon node appears to be on the wrong network".to_string()); + } + + Some(bn_http_client) + } else { + None + }; + + let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?; + let voting_keystore_password = if specify_voting_keystore_password { + eprintln!("Please enter a voting keystore password when prompted."); + Some(read_password_from_user(stdin_inputs)?) + } else { + None + }; + + /* + * Generate a wallet to be used for HD key generation. + */ + + // A random password is always appropriate for the wallet since it is ephemeral. + let wallet_password = random_password_string(); + // A random password is always appropriate for the withdrawal keystore since we don't ever store + // it anywhere. + let withdrawal_keystore_password = random_password_string(); + let mut wallet = + WalletBuilder::from_mnemonic(&mnemonic, wallet_password.as_ref(), "".to_string()) + .map_err(|e| format!("Unable create seed from mnemonic: {:?}", e))? + .build() + .map_err(|e| format!("Unable to create wallet: {:?}", e))?; + + /* + * Start deriving individual validators. + */ + + eprintln!( + "Starting derivation of {} keystores. Each keystore may take several seconds.", + count + ); + + let mut validators = Vec::with_capacity(count as usize); + let mut deposits = (!disable_deposits).then(Vec::new); + + for (i, derivation_index) in (first_index..first_index + count).enumerate() { + // If the voting keystore password was not provided by the user then use a unique random + // string for each validator. + let voting_keystore_password = voting_keystore_password + .clone() + .unwrap_or_else(random_password_string); + + // Set the wallet to the appropriate derivation index. + wallet + .set_nextaccount(derivation_index) + .map_err(|e| format!("Failure to set validator derivation index: {:?}", e))?; + + // Derive the keystore from the HD wallet. + let keystores = wallet + .next_validator( + wallet_password.as_ref(), + voting_keystore_password.as_ref(), + withdrawal_keystore_password.as_ref(), + ) + .map_err(|e| format!("Failed to derive keystore {}: {:?}", i, e))?; + let voting_keystore = keystores.voting; + let voting_public_key = voting_keystore + .public_key() + .ok_or_else(|| { + format!("Validator keystore at index {} is missing a public key", i) + })? + .into(); + + // If the user has provided a beacon node URL, check that the validator doesn't already + // exist in the beacon chain. + if let Some(bn_http_client) = &bn_http_client { + match bn_http_client + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(voting_public_key), + ) + .await + { + Ok(Some(_)) => { + return Err(format!( + "Validator {:?} at derivation index {} already exists in the beacon chain. \ + This indicates a slashing risk, be sure to never run the same validator on two \ + different validator clients. If you understand the risks and are certain you \ + wish to generate this validator again, omit the --{} flag.", + voting_public_key, derivation_index, BEACON_NODE_FLAG + ))? + } + Ok(None) => eprintln!( + "{:?} was not found in the beacon chain", + voting_public_key + ), + Err(e) => { + return Err(format!( + "Error checking if validator exists in beacon chain: {:?}", + e + )) + } + } + } + + if let Some(deposits) = &mut deposits { + // Decrypt the voting keystore so a deposit message can be signed. + let voting_keypair = voting_keystore + .decrypt_keypair(voting_keystore_password.as_ref()) + .map_err(|e| format!("Failed to decrypt voting keystore {}: {:?}", i, e))?; + + // Sanity check to ensure the keystore is reporting the correct public key. + if PublicKeyBytes::from(voting_keypair.pk.clone()) != voting_public_key { + return Err(format!( + "Mismatch for keystore public key and derived public key \ + for derivation index {}", + derivation_index + )); + } + + let withdrawal_credentials = + if let Some(eth1_withdrawal_address) = eth1_withdrawal_address { + WithdrawalCredentials::eth1(eth1_withdrawal_address, spec) + } else { + // Decrypt the withdrawal keystore so withdrawal credentials can be created. It's + // not strictly necessary to decrypt the keystore since we can read the pubkey + // directly from the keystore. However we decrypt the keystore to be more certain + // that we have access to the withdrawal keys. + let withdrawal_keypair = keystores + .withdrawal + .decrypt_keypair(withdrawal_keystore_password.as_ref()) + .map_err(|e| { + format!("Failed to decrypt withdrawal keystore {}: {:?}", i, e) + })?; + WithdrawalCredentials::bls(&withdrawal_keypair.pk, spec) + }; + + // Create a JSON structure equivalent to the one generated by + // `ethereum/staking-deposit-cli`. + let json_deposit = StandardDepositDataJson::new( + &voting_keypair, + withdrawal_credentials.into(), + deposit_gwei, + spec, + )?; + + deposits.push(json_deposit); + } + + let validator = ValidatorSpecification { + voting_keystore: KeystoreJsonStr(voting_keystore), + voting_keystore_password: voting_keystore_password.clone(), + // New validators have no slashing protection history. + slashing_protection: None, + fee_recipient, + gas_limit, + builder_proposals, + // Allow the VC to choose a default "enabled" state. Since "enabled" is not part of + // the standard API, leaving this as `None` means we are not forced to use the + // non-standard API. + enabled: None, + }; + + eprintln!( + "Completed {}/{}: {:?}", + i.saturating_add(1), + count, + voting_public_key + ); + + validators.push(validator); + } + + Ok(Self { + validators, + deposits, + }) + } +} + +pub async fn cli_run<'a, T: EthSpec>( + matches: &'a ArgMatches<'a>, + spec: &ChainSpec, + dump_config: DumpConfig, +) -> Result<(), String> { + let config = CreateConfig::from_cli(matches, spec)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run::(config, spec).await + } +} + +async fn run<'a, T: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result<(), String> { + let output_path = config.output_path.clone(); + + if !output_path.exists() { + fs::create_dir(&output_path) + .map_err(|e| format!("Failed to create {:?} directory: {:?}", output_path, e))?; + } else if !output_path.is_dir() { + return Err(format!("{:?} must be a directory", output_path)); + } + + let validators_path = output_path.join(VALIDATORS_FILENAME); + if validators_path.exists() { + return Err(format!( + "{:?} already exists, refusing to overwrite", + validators_path + )); + } + let deposits_path = output_path.join(DEPOSITS_FILENAME); + if deposits_path.exists() { + return Err(format!( + "{:?} already exists, refusing to overwrite", + deposits_path + )); + } + + let validators_and_deposits = ValidatorsAndDeposits::new::(config, spec).await?; + + eprintln!("Keystore generation complete"); + + write_to_json_file(&validators_path, &validators_and_deposits.validators)?; + + if let Some(deposits) = &validators_and_deposits.deposits { + write_to_json_file(&deposits_path, deposits)?; + } + + Ok(()) +} + +// The tests use crypto and are too slow in debug. +#[cfg(not(debug_assertions))] +#[cfg(test)] +pub mod tests { + use super::*; + use eth2_network_config::Eth2NetworkConfig; + use regex::Regex; + use std::path::Path; + use std::str::FromStr; + use tempfile::{tempdir, TempDir}; + use tree_hash::TreeHash; + + type E = MainnetEthSpec; + + const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "2.3.0"; + + fn junk_execution_address() -> Option
{ + Some(Address::from_str("0x0f51bb10119727a7e5ea3538074fb341f56b09ad").unwrap()) + } + + pub struct TestBuilder { + spec: ChainSpec, + output_dir: TempDir, + mnemonic_dir: TempDir, + config: CreateConfig, + } + + impl Default for TestBuilder { + fn default() -> Self { + Self::new(E::default_spec()) + } + } + + impl TestBuilder { + pub fn new(spec: ChainSpec) -> Self { + let output_dir = tempdir().unwrap(); + let mnemonic_dir = tempdir().unwrap(); + let mnemonic_path = mnemonic_dir.path().join("mnemonic"); + fs::write( + &mnemonic_path, + "test test test test test test test test test test test waste", + ) + .unwrap(); + + let config = CreateConfig { + output_path: output_dir.path().into(), + first_index: 0, + count: 1, + deposit_gwei: spec.max_effective_balance, + mnemonic_path: Some(mnemonic_path), + stdin_inputs: false, + disable_deposits: false, + specify_voting_keystore_password: false, + eth1_withdrawal_address: junk_execution_address(), + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + bn_url: None, + force_bls_withdrawal_credentials: false, + }; + + Self { + spec, + output_dir, + mnemonic_dir, + config, + } + } + + pub fn mutate_config(mut self, func: F) -> Self { + func(&mut self.config); + self + } + + pub async fn run_test(self) -> TestResult { + let Self { + spec, + output_dir, + mnemonic_dir, + config, + } = self; + + let result = run::(config.clone(), &spec).await; + + if result.is_ok() { + let validators_file_contents = + fs::read_to_string(output_dir.path().join(VALIDATORS_FILENAME)).unwrap(); + let validators: Vec = + serde_json::from_str(&validators_file_contents).unwrap(); + + assert_eq!(validators.len(), config.count as usize); + + for (i, validator) in validators.iter().enumerate() { + let voting_keystore = &validator.voting_keystore.0; + let keypair = voting_keystore + .decrypt_keypair(validator.voting_keystore_password.as_ref()) + .unwrap(); + assert_eq!(keypair.pk, voting_keystore.public_key().unwrap()); + assert_eq!( + voting_keystore.path().unwrap(), + format!("m/12381/3600/{}/0/0", config.first_index as usize + i) + ); + assert!(validator.slashing_protection.is_none()); + assert_eq!(validator.fee_recipient, config.fee_recipient); + assert_eq!(validator.gas_limit, config.gas_limit); + assert_eq!(validator.builder_proposals, config.builder_proposals); + assert_eq!(validator.enabled, None); + } + + let deposits_path = output_dir.path().join(DEPOSITS_FILENAME); + if config.disable_deposits { + assert!(!deposits_path.exists()); + } else { + let deposits_file_contents = fs::read_to_string(&deposits_path).unwrap(); + let deposits: Vec = + serde_json::from_str(&deposits_file_contents).unwrap(); + + assert_eq!(deposits.len(), config.count as usize); + + for (validator, deposit) in validators.iter().zip(deposits.iter()) { + let validator_pubkey = validator.voting_keystore.0.public_key().unwrap(); + assert_eq!(deposit.pubkey, validator_pubkey.clone().into()); + if let Some(address) = config.eth1_withdrawal_address { + assert_eq!( + deposit.withdrawal_credentials.as_bytes()[0], + spec.eth1_address_withdrawal_prefix_byte + ); + assert_eq!( + &deposit.withdrawal_credentials.as_bytes()[12..], + address.as_bytes() + ); + } else { + assert_eq!( + deposit.withdrawal_credentials.as_bytes()[0], + spec.bls_withdrawal_prefix_byte + ); + } + assert_eq!(deposit.amount, config.deposit_gwei); + let deposit_message = DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: SignatureBytes::empty(), + } + .as_deposit_message(); + assert!(deposit.signature.decompress().unwrap().verify( + &validator_pubkey, + deposit_message.signing_root(spec.get_deposit_domain()) + )); + assert_eq!(deposit.fork_version, spec.genesis_fork_version); + assert_eq!(&deposit.network_name, spec.config_name.as_ref().unwrap()); + assert_eq!( + deposit.deposit_message_root, + deposit_message.tree_hash_root() + ); + assert_eq!( + deposit.deposit_data_root, + DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: deposit.signature.clone() + } + .tree_hash_root() + ); + } + } + } + + // The directory containing the mnemonic can now be removed. + drop(mnemonic_dir); + + TestResult { result, output_dir } + } + } + + #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. + pub struct TestResult { + pub result: Result<(), String>, + pub output_dir: TempDir, + } + + impl TestResult { + pub fn validators_file_path(&self) -> PathBuf { + self.output_dir.path().join(VALIDATORS_FILENAME) + } + + pub fn validators(&self) -> Vec { + let contents = fs::read_to_string(self.validators_file_path()).unwrap(); + serde_json::from_str(&contents).unwrap() + } + + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + + fn assert_err(self) { + assert!(self.result.is_err()) + } + } + + #[tokio::test] + async fn default_test_values() { + TestBuilder::default().run_test().await.assert_ok(); + } + + #[tokio::test] + async fn no_eth1_address_without_force() { + TestBuilder::default() + .mutate_config(|config| { + config.eth1_withdrawal_address = None; + config.force_bls_withdrawal_credentials = false + }) + .run_test() + .await + .assert_err(); + } + + #[tokio::test] + async fn bls_withdrawal_credentials() { + TestBuilder::default() + .mutate_config(|config| { + config.eth1_withdrawal_address = None; + config.force_bls_withdrawal_credentials = true + }) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn default_test_values_deposits_disabled() { + TestBuilder::default() + .mutate_config(|config| config.disable_deposits = true) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn count_is_zero() { + TestBuilder::default() + .mutate_config(|config| config.count = 0) + .run_test() + .await + .assert_err(); + } + + #[tokio::test] + async fn eth1_withdrawal_addresses() { + TestBuilder::default() + .mutate_config(|config| { + config.count = 2; + config.eth1_withdrawal_address = junk_execution_address(); + }) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn non_zero_first_index() { + TestBuilder::default() + .mutate_config(|config| { + config.first_index = 2; + config.count = 2; + }) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn misc_modifications() { + TestBuilder::default() + .mutate_config(|config| { + config.deposit_gwei = 42; + config.builder_proposals = Some(true); + config.gas_limit = Some(1337); + }) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn bogus_bn_url() { + TestBuilder::default() + .mutate_config(|config| { + config.bn_url = + Some(SensitiveUrl::from_str("http://sdjfvwfhsdhfschwkeyfwhwlga.com").unwrap()); + }) + .run_test() + .await + .assert_err(); + } + + #[tokio::test] + async fn staking_deposit_cli_vectors() { + let vectors_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("test_vectors") + .join("vectors"); + for entry in fs::read_dir(vectors_dir).unwrap() { + let entry = entry.unwrap(); + let file_name = entry.file_name(); + let vector_name = file_name.to_str().unwrap(); + let path = entry.path(); + // Leave this `println!` so we can tell which test fails. + println!("Running test {}", vector_name); + run_test_vector(vector_name, &path).await; + } + } + + async fn run_test_vector>(name: &str, vectors_path: P) { + /* + * Parse the test vector name into a set of test parameters. + */ + let re = Regex::new(r"(.*)_(.*)_(.*)_(.*)_(.*)_(.*)_(.*)").unwrap(); + let capture = re.captures_iter(name).next().unwrap(); + let network = capture.get(1).unwrap().as_str(); + let first = u32::from_str(capture.get(3).unwrap().as_str()).unwrap(); + let count = u32::from_str(capture.get(5).unwrap().as_str()).unwrap(); + let uses_eth1 = bool::from_str(capture.get(7).unwrap().as_str()).unwrap(); + + /* + * Use the test parameters to generate equivalent files "locally" (i.e., with our code). + */ + + let spec = Eth2NetworkConfig::constant(network) + .unwrap() + .unwrap() + .chain_spec::() + .unwrap(); + + let test_result = TestBuilder::new(spec) + .mutate_config(|config| { + config.first_index = first; + config.count = count; + if uses_eth1 { + config.eth1_withdrawal_address = Some( + Address::from_str("0x0f51bb10119727a7e5ea3538074fb341f56b09ad").unwrap(), + ); + } else { + config.eth1_withdrawal_address = None; + config.force_bls_withdrawal_credentials = true; + } + }) + .run_test() + .await; + let TestResult { result, output_dir } = test_result; + result.expect("local generation should succeed"); + + /* + * Ensure the deposit data is identical when parsed as JSON. + */ + + let local_deposits = { + let path = output_dir.path().join(DEPOSITS_FILENAME); + let contents = fs::read_to_string(&path).unwrap(); + let mut deposits: Vec = + serde_json::from_str(&contents).unwrap(); + for deposit in &mut deposits { + // Ensures we can match test vectors. + deposit.deposit_cli_version = TEST_VECTOR_DEPOSIT_CLI_VERSION.to_string(); + + // We use "prater" and the vectors use "goerli" now. The two names refer to the same + // network so there should be no issue here. + if deposit.network_name == "prater" { + deposit.network_name = "goerli".to_string(); + } + } + deposits + }; + let vector_deposits: Vec = { + let path = fs::read_dir(vectors_path.as_ref().join("validator_keys")) + .unwrap() + .find_map(|entry| { + let entry = entry.unwrap(); + let file_name = entry.file_name(); + if file_name.to_str().unwrap().starts_with("deposit_data") { + Some(entry.path()) + } else { + None + } + }) + .unwrap(); + let contents = fs::read_to_string(path).unwrap(); + serde_json::from_str(&contents).unwrap() + }; + + assert_eq!(local_deposits, vector_deposits); + + /* + * Note: we don't check the keystores generated by the deposit-cli since there is little + * value in this. + * + * If we check the deposits then we are verifying the signature across the deposit message. + * This implicitly verifies that the keypair generated by the deposit-cli is identical to + * the one created by Lighthouse. + */ + } +} diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs new file mode 100644 index 0000000000..4b924189f2 --- /dev/null +++ b/validator_manager/src/import_validators.rs @@ -0,0 +1,436 @@ +use super::common::*; +use crate::DumpConfig; +use clap::{App, Arg, ArgMatches}; +use eth2::{lighthouse_vc::std_types::ImportKeystoreStatus, SensitiveUrl}; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::PathBuf; + +pub const CMD: &str = "import"; +pub const VALIDATORS_FILE_FLAG: &str = "validators-file"; +pub const VC_URL_FLAG: &str = "vc-url"; +pub const VC_TOKEN_FLAG: &str = "vc-token"; + +pub const DETECTED_DUPLICATE_MESSAGE: &str = "Duplicate validator detected!"; + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new(CMD) + .about( + "Uploads validators to a validator client using the HTTP API. The validators \ + are defined in a JSON file which can be generated using the \"create-validators\" \ + command.", + ) + .arg( + Arg::with_name(VALIDATORS_FILE_FLAG) + .long(VALIDATORS_FILE_FLAG) + .value_name("PATH_TO_JSON_FILE") + .help( + "The path to a JSON file containing a list of validators to be \ + imported to the validator client. This file is usually named \ + \"validators.json\".", + ) + .required(true) + .takes_value(true), + ) + .arg( + Arg::with_name(VC_URL_FLAG) + .long(VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help( + "A HTTP(S) address of a validator client using the keymanager-API. \ + If this value is not supplied then a 'dry run' will be conducted where \ + no changes are made to the validator client.", + ) + .default_value("http://localhost:5062") + .requires(VC_TOKEN_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(VC_TOKEN_FLAG) + .long(VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the validator client.") + .takes_value(true), + ) + .arg( + Arg::with_name(IGNORE_DUPLICATES_FLAG) + .takes_value(false) + .long(IGNORE_DUPLICATES_FLAG) + .help( + "If present, ignore any validators which already exist on the VC. \ + Without this flag, the process will terminate without making any changes. \ + This flag should be used with caution, whilst it does not directly cause \ + slashable conditions, it might be an indicator that something is amiss. \ + Users should also be careful to avoid submitting duplicate deposits for \ + validators that already exist on the VC.", + ), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct ImportConfig { + pub validators_file_path: PathBuf, + pub vc_url: SensitiveUrl, + pub vc_token_path: PathBuf, + pub ignore_duplicates: bool, +} + +impl ImportConfig { + fn from_cli(matches: &ArgMatches) -> Result { + Ok(Self { + validators_file_path: clap_utils::parse_required(matches, VALIDATORS_FILE_FLAG)?, + vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, + ignore_duplicates: matches.is_present(IGNORE_DUPLICATES_FLAG), + }) + } +} + +pub async fn cli_run<'a>( + matches: &'a ArgMatches<'a>, + dump_config: DumpConfig, +) -> Result<(), String> { + let config = ImportConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await + } +} + +async fn run<'a>(config: ImportConfig) -> Result<(), String> { + let ImportConfig { + validators_file_path, + vc_url, + vc_token_path, + ignore_duplicates, + } = config; + + if !validators_file_path.exists() { + return Err(format!("Unable to find file at {:?}", validators_file_path)); + } + + let validators_file = fs::OpenOptions::new() + .read(true) + .create(false) + .open(&validators_file_path) + .map_err(|e| format!("Unable to open {:?}: {:?}", validators_file_path, e))?; + let validators: Vec = serde_json::from_reader(&validators_file) + .map_err(|e| { + format!( + "Unable to parse JSON in {:?}: {:?}", + validators_file_path, e + ) + })?; + + let count = validators.len(); + + let (http_client, _keystores) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + + eprintln!( + "Starting to submit {} validators to VC, each validator may take several seconds", + count + ); + + for (i, validator) in validators.into_iter().enumerate() { + match validator.upload(&http_client, ignore_duplicates).await { + Ok(status) => { + match status.status { + ImportKeystoreStatus::Imported => { + eprintln!("Uploaded keystore {} of {} to the VC", i + 1, count) + } + ImportKeystoreStatus::Duplicate => { + if ignore_duplicates { + eprintln!("Re-uploaded keystore {} of {} to the VC", i + 1, count) + } else { + eprintln!( + "Keystore {} of {} was uploaded to the VC, but it was a duplicate. \ + Exiting now, use --{} to allow duplicates.", + i + 1, count, IGNORE_DUPLICATES_FLAG + ); + return Err(DETECTED_DUPLICATE_MESSAGE.to_string()); + } + } + ImportKeystoreStatus::Error => { + eprintln!( + "Upload of keystore {} of {} failed with message: {:?}. \ + A potential solution is run this command again \ + using the --{} flag, however care should be taken to ensure \ + that there are no duplicate deposits submitted.", + i + 1, + count, + status.message, + IGNORE_DUPLICATES_FLAG + ); + return Err(format!("Upload failed with {:?}", status.message)); + } + } + } + e @ Err(UploadError::InvalidPublicKey) => { + eprintln!("Validator {} has an invalid public key", i); + return Err(format!("{:?}", e)); + } + ref e @ Err(UploadError::DuplicateValidator(voting_public_key)) => { + eprintln!( + "Duplicate validator {:?} already exists on the destination validator client. \ + This may indicate that some validators are running in two places at once, which \ + can lead to slashing. If you are certain that there is no risk, add the --{} flag.", + voting_public_key, IGNORE_DUPLICATES_FLAG + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::FailedToListKeys(e)) => { + eprintln!( + "Failed to list keystores. Some keys may have been imported whilst \ + others may not have been imported. A potential solution is run this command again \ + using the --{} flag, however care should be taken to ensure that there are no \ + duplicate deposits submitted.", + IGNORE_DUPLICATES_FLAG + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::KeyUploadFailed(e)) => { + eprintln!( + "Failed to upload keystore. Some keys may have been imported whilst \ + others may not have been imported. A potential solution is run this command again \ + using the --{} flag, however care should be taken to ensure that there are no \ + duplicate deposits submitted.", + IGNORE_DUPLICATES_FLAG + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::IncorrectStatusCount(count)) => { + eprintln!( + "Keystore was uploaded, however the validator client returned an invalid response. \ + A potential solution is run this command again using the --{} flag, however care \ + should be taken to ensure that there are no duplicate deposits submitted.", + IGNORE_DUPLICATES_FLAG + ); + return Err(format!( + "Invalid status count in import response: {}", + count + )); + } + Err(UploadError::FeeRecipientUpdateFailed(e)) => { + eprintln!( + "Failed to set fee recipient for validator {}. This value may need \ + to be set manually. Continuing with other validators. Error was {:?}", + i, e + ); + } + Err(UploadError::PatchValidatorFailed(e)) => { + eprintln!( + "Failed to set some values on validator {} (e.g., builder, enabled or gas limit. \ + These values value may need to be set manually. Continuing with other validators. \ + Error was {:?}", + i, e + ); + } + } + } + + Ok(()) +} + +// The tests use crypto and are too slow in debug. +#[cfg(not(debug_assertions))] +#[cfg(test)] +pub mod tests { + use super::*; + use crate::create_validators::tests::TestBuilder as CreateTestBuilder; + use std::fs; + use tempfile::{tempdir, TempDir}; + use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + + const VC_TOKEN_FILE_NAME: &str = "vc_token.json"; + + pub struct TestBuilder { + import_config: ImportConfig, + pub vc: ApiTester, + /// Holds the temp directory owned by the `CreateTestBuilder` so it doesn't get cleaned-up + /// before we can read it. + create_dir: Option, + _dir: TempDir, + } + + impl TestBuilder { + pub async fn new() -> Self { + Self::new_with_http_config(ApiTester::default_http_config()).await + } + + pub async fn new_with_http_config(http_config: HttpConfig) -> Self { + let dir = tempdir().unwrap(); + let vc = ApiTester::new_with_http_config(http_config).await; + let vc_token_path = dir.path().join(VC_TOKEN_FILE_NAME); + fs::write(&vc_token_path, &vc.api_token).unwrap(); + + Self { + import_config: ImportConfig { + // This field will be overwritten later on. + validators_file_path: dir.path().into(), + vc_url: vc.url.clone(), + vc_token_path, + ignore_duplicates: false, + }, + vc, + create_dir: None, + _dir: dir, + } + } + + pub fn mutate_import_config(mut self, func: F) -> Self { + func(&mut self.import_config); + self + } + + pub async fn create_validators(mut self, count: u32, first_index: u32) -> Self { + let create_result = CreateTestBuilder::default() + .mutate_config(|config| { + config.count = count; + config.first_index = first_index; + }) + .run_test() + .await; + assert!( + create_result.result.is_ok(), + "precondition: validators are created" + ); + self.import_config.validators_file_path = create_result.validators_file_path(); + self.create_dir = Some(create_result.output_dir); + self + } + + /// Imports validators without running the entire test suite in `Self::run_test`. This is + /// useful for simulating duplicate imports. + pub async fn import_validators_without_checks(self) -> Self { + run(self.import_config.clone()).await.unwrap(); + self + } + + pub async fn run_test(self) -> TestResult { + let result = run(self.import_config.clone()).await; + + if result.is_ok() { + self.vc.ensure_key_cache_consistency().await; + + let local_validators: Vec = { + let contents = + fs::read_to_string(&self.import_config.validators_file_path).unwrap(); + serde_json::from_str(&contents).unwrap() + }; + let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; + + assert_eq!( + local_validators.len(), + list_keystores_response.len(), + "vc should have exactly the number of validators imported" + ); + + for local_validator in &local_validators { + let local_keystore = &local_validator.voting_keystore.0; + let local_pubkey = local_keystore.public_key().unwrap().into(); + let remote_validator = list_keystores_response + .iter() + .find(|validator| validator.validating_pubkey == local_pubkey) + .expect("validator must exist on VC"); + assert_eq!(&remote_validator.derivation_path, &local_keystore.path()); + assert_eq!(remote_validator.readonly, Some(false)); + } + } + + TestResult { + result, + vc: self.vc, + } + } + } + + #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. + pub struct TestResult { + pub result: Result<(), String>, + pub vc: ApiTester, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + + fn assert_err_contains(self, msg: &str) { + assert!(self.result.unwrap_err().contains(msg)) + } + } + + #[tokio::test] + async fn create_one_validator() { + TestBuilder::new() + .await + .create_validators(1, 0) + .await + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn create_three_validators() { + TestBuilder::new() + .await + .create_validators(3, 0) + .await + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn create_one_validator_with_offset() { + TestBuilder::new() + .await + .create_validators(1, 42) + .await + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn create_three_validators_with_offset() { + TestBuilder::new() + .await + .create_validators(3, 1337) + .await + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn import_duplicates_when_disallowed() { + TestBuilder::new() + .await + .create_validators(1, 0) + .await + .import_validators_without_checks() + .await + .run_test() + .await + .assert_err_contains("DuplicateValidator"); + } + + #[tokio::test] + async fn import_duplicates_when_allowed() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.ignore_duplicates = true; + }) + .create_validators(1, 0) + .await + .import_validators_without_checks() + .await + .run_test() + .await + .assert_ok(); + } +} diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs new file mode 100644 index 0000000000..6889ee79d2 --- /dev/null +++ b/validator_manager/src/lib.rs @@ -0,0 +1,85 @@ +use clap::App; +use clap::ArgMatches; +use common::write_to_json_file; +use environment::Environment; +use serde::Serialize; +use std::path::PathBuf; +use types::EthSpec; + +pub mod common; +pub mod create_validators; +pub mod import_validators; +pub mod move_validators; + +pub const CMD: &str = "validator_manager"; + +/// This flag is on the top-level `lighthouse` binary. +const DUMP_CONFIGS_FLAG: &str = "dump-config"; + +/// Used only in testing, this allows a command to dump its configuration to a file and then exit +/// successfully. This allows for testing how the CLI arguments translate to some configuration. +pub enum DumpConfig { + Disabled, + Enabled(PathBuf), +} + +impl DumpConfig { + /// Returns `Ok(true)` if the configuration was successfully written to a file and the + /// application should exit successfully without doing anything else. + pub fn should_exit_early(&self, config: &T) -> Result { + match self { + DumpConfig::Disabled => Ok(false), + DumpConfig::Enabled(dump_path) => { + dbg!(dump_path); + write_to_json_file(dump_path, config)?; + Ok(true) + } + } + } +} + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new(CMD) + .visible_aliases(&["vm", "validator-manager", CMD]) + .about("Utilities for managing a Lighthouse validator client via the HTTP API.") + .subcommand(create_validators::cli_app()) + .subcommand(import_validators::cli_app()) + .subcommand(move_validators::cli_app()) +} + +/// Run the account manager, returning an error if the operation did not succeed. +pub fn run<'a, T: EthSpec>(matches: &'a ArgMatches<'a>, env: Environment) -> Result<(), String> { + let context = env.core_context(); + let spec = context.eth2_config.spec; + let dump_config = clap_utils::parse_optional(matches, DUMP_CONFIGS_FLAG)? + .map(DumpConfig::Enabled) + .unwrap_or_else(|| DumpConfig::Disabled); + + context + .executor + // This `block_on_dangerous` call reasonable since it is at the very highest level of the + // application, the rest of which is all async. All other functions below this should be + // async and should never call `block_on_dangerous` themselves. + .block_on_dangerous( + async { + match matches.subcommand() { + (create_validators::CMD, Some(matches)) => { + create_validators::cli_run::(matches, &spec, dump_config).await + } + (import_validators::CMD, Some(matches)) => { + import_validators::cli_run(matches, dump_config).await + } + (move_validators::CMD, Some(matches)) => { + move_validators::cli_run(matches, dump_config).await + } + ("", _) => Err("No command supplied. See --help.".to_string()), + (unknown, _) => Err(format!( + "{} is not a valid {} command. See --help.", + unknown, CMD + )), + } + }, + "validator_manager", + ) + .ok_or("Shutting down")? +} diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs new file mode 100644 index 0000000000..fa886e8f94 --- /dev/null +++ b/validator_manager/src/move_validators.rs @@ -0,0 +1,1253 @@ +use super::common::*; +use crate::DumpConfig; +use account_utils::{read_password_from_user, ZeroizeString}; +use clap::{App, Arg, ArgMatches}; +use eth2::{ + lighthouse_vc::{ + std_types::{ + DeleteKeystoreStatus, DeleteKeystoresRequest, ImportKeystoreStatus, InterchangeJsonStr, + Status, + }, + types::{ExportKeystoresResponse, SingleExportKeystoresResponse}, + }, + SensitiveUrl, +}; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; +use std::str::FromStr; +use std::time::Duration; +use tokio::time::sleep; +use types::{Address, PublicKeyBytes}; + +pub const MOVE_DIR_NAME: &str = "lighthouse-validator-move"; +pub const VALIDATOR_SPECIFICATION_FILE: &str = "validator-specification.json"; + +pub const CMD: &str = "move"; +pub const SRC_VC_URL_FLAG: &str = "src-vc-url"; +pub const SRC_VC_TOKEN_FLAG: &str = "src-vc-token"; +pub const DEST_VC_URL_FLAG: &str = "dest-vc-url"; +pub const DEST_VC_TOKEN_FLAG: &str = "dest-vc-token"; +pub const VALIDATORS_FLAG: &str = "validators"; +pub const GAS_LIMIT_FLAG: &str = "gas-limit"; +pub const FEE_RECIPIENT_FLAG: &str = "suggested-fee-recipient"; +pub const BUILDER_PROPOSALS_FLAG: &str = "builder-proposals"; + +const NO_VALIDATORS_MSG: &str = "No validators present on source validator client"; + +const UPLOAD_RETRY_WAIT: Duration = Duration::from_secs(5); + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub enum PasswordSource { + /// Reads the password from the user via the terminal. + Interactive { stdin_inputs: bool }, + /// This variant is panic-y and should only be used during testing. + Testing(HashMap>), +} + +impl PasswordSource { + fn read_password(&mut self, pubkey: &PublicKeyBytes) -> Result { + match self { + PasswordSource::Interactive { stdin_inputs } => { + eprintln!("Please enter a password for keystore {:?}:", pubkey); + read_password_from_user(*stdin_inputs) + } + // This path with panic if the password list is empty. Since the + // password prompt will just keep retrying on a failed password, the + // panic helps us break the loop if we misconfigure the test. + PasswordSource::Testing(passwords) => Ok(passwords + .get_mut(pubkey) + .expect("pubkey should be known") + .remove(0) + .into()), + } + } +} + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new(CMD) + .about( + "Uploads validators to a validator client using the HTTP API. The validators \ + are defined in a JSON file which can be generated using the \"create-validators\" \ + command. This command only supports validators signing via a keystore on the local \ + file system (i.e., not Web3Signer validators).", + ) + .arg( + Arg::with_name(SRC_VC_URL_FLAG) + .long(SRC_VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help( + "A HTTP(S) address of a validator client using the keymanager-API. \ + This validator client is the \"source\" and contains the validators \ + that are to be moved.", + ) + .required(true) + .requires(SRC_VC_TOKEN_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(SRC_VC_TOKEN_FLAG) + .long(SRC_VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the source validator client.") + .takes_value(true), + ) + .arg( + Arg::with_name(DEST_VC_URL_FLAG) + .long(DEST_VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help( + "A HTTP(S) address of a validator client using the keymanager-API. \ + This validator client is the \"destination\" and will have new validators \ + added as they are removed from the \"source\" validator client.", + ) + .required(true) + .requires(DEST_VC_TOKEN_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(DEST_VC_TOKEN_FLAG) + .long(DEST_VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the destination validator client.") + .takes_value(true), + ) + .arg( + Arg::with_name(VALIDATORS_FLAG) + .long(VALIDATORS_FLAG) + .value_name("STRING") + .help( + "The validators to be moved. Either a list of 0x-prefixed \ + validator pubkeys or the keyword \"all\".", + ) + .takes_value(true), + ) + .arg( + Arg::with_name(COUNT_FLAG) + .long(COUNT_FLAG) + .value_name("VALIDATOR_COUNT") + .help("The number of validators to move.") + .conflicts_with(VALIDATORS_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(GAS_LIMIT_FLAG) + .long(GAS_LIMIT_FLAG) + .value_name("UINT64") + .help( + "All created validators will use this gas limit. It is recommended \ + to leave this as the default value by not specifying this flag.", + ) + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name(FEE_RECIPIENT_FLAG) + .long(FEE_RECIPIENT_FLAG) + .value_name("ETH1_ADDRESS") + .help( + "All created validators will use this value for the suggested \ + fee recipient. Omit this flag to use the default value from the VC.", + ) + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name(BUILDER_PROPOSALS_FLAG) + .long(BUILDER_PROPOSALS_FLAG) + .help( + "When provided, all created validators will attempt to create \ + blocks via builder rather than the local EL.", + ) + .required(false) + .possible_values(&["true", "false"]) + .takes_value(true), + ) + .arg( + Arg::with_name(STDIN_INPUTS_FLAG) + .takes_value(false) + .hidden(cfg!(windows)) + .long(STDIN_INPUTS_FLAG) + .help("If present, read all user inputs from stdin instead of tty."), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub enum Validators { + All, + Count(usize), + Specific(Vec), +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct MoveConfig { + pub src_vc_url: SensitiveUrl, + pub src_vc_token_path: PathBuf, + pub dest_vc_url: SensitiveUrl, + pub dest_vc_token_path: PathBuf, + pub validators: Validators, + pub builder_proposals: Option, + pub fee_recipient: Option
, + pub gas_limit: Option, + pub password_source: PasswordSource, +} + +impl MoveConfig { + fn from_cli(matches: &ArgMatches) -> Result { + let count_flag = clap_utils::parse_optional(matches, COUNT_FLAG)?; + let validators_flag = matches.value_of(VALIDATORS_FLAG); + let validators = match (count_flag, validators_flag) { + (Some(count), None) => Validators::Count(count), + (None, Some(string)) => match string { + "all" => Validators::All, + pubkeys => pubkeys + .split(',') + .map(PublicKeyBytes::from_str) + .collect::, _>>() + .map(Validators::Specific)?, + }, + (None, None) => Err(format!( + "Must supply either --{VALIDATORS_FLAG} or --{COUNT_FLAG}." + ))?, + (Some(_), Some(_)) => { + Err("Cannot supply both --{VALIDATORS_FLAG} and --{COUNT_FLAG}.")? + } + }; + + Ok(Self { + src_vc_url: clap_utils::parse_required(matches, SRC_VC_URL_FLAG)?, + src_vc_token_path: clap_utils::parse_required(matches, SRC_VC_TOKEN_FLAG)?, + dest_vc_url: clap_utils::parse_required(matches, DEST_VC_URL_FLAG)?, + dest_vc_token_path: clap_utils::parse_required(matches, DEST_VC_TOKEN_FLAG)?, + validators, + builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS_FLAG)?, + fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, + gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, + password_source: PasswordSource::Interactive { + stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), + }, + }) + } +} + +pub async fn cli_run<'a>( + matches: &'a ArgMatches<'a>, + dump_config: DumpConfig, +) -> Result<(), String> { + let config = MoveConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await + } +} + +async fn run<'a>(config: MoveConfig) -> Result<(), String> { + let MoveConfig { + src_vc_url, + src_vc_token_path, + dest_vc_url, + dest_vc_token_path, + validators, + builder_proposals, + fee_recipient, + gas_limit, + mut password_source, + } = config; + + // Moving validators between the same VC is unlikely to be useful and probably indicates a user + // error. + if src_vc_url == dest_vc_url { + return Err(format!( + "--{} and --{} must be different", + SRC_VC_URL_FLAG, DEST_VC_URL_FLAG + )); + } + + let (src_http_client, src_keystores) = + vc_http_client(src_vc_url.clone(), &src_vc_token_path).await?; + let (dest_http_client, _dest_keystores) = + vc_http_client(dest_vc_url.clone(), &dest_vc_token_path).await?; + + if src_keystores.is_empty() { + return Err(NO_VALIDATORS_MSG.to_string()); + } + + let pubkeys_to_move = match validators { + Validators::All => src_keystores.iter().map(|v| v.validating_pubkey).collect(), + Validators::Count(count) => { + let mut viable_pubkeys: Vec<_> = src_keystores + .iter() + .filter(|v| !v.readonly.unwrap_or(true)) + .map(|v| v.validating_pubkey) + .collect(); + viable_pubkeys.sort_unstable_by_key(PublicKeyBytes::serialize); + viable_pubkeys + .get(0..count) + .ok_or_else(|| { + format!( + "Cannot move {} keystores since source validator client only has {} \ + keystores which are able to be moved (not read-only).", + count, + viable_pubkeys.len() + ) + })? + .to_vec() + } + Validators::Specific(request_pubkeys) => { + let request_pubkeys_set: HashSet<_> = request_pubkeys.iter().collect(); + let src_pubkeys_set: HashSet<_> = + src_keystores.iter().map(|v| &v.validating_pubkey).collect(); + let difference = request_pubkeys_set + .difference(&src_pubkeys_set) + .collect::>(); + if !difference.is_empty() { + for pk in &difference { + eprintln!("{:?} is not present on {:?}", pk, src_vc_url); + } + return Err(format!( + "{} validators not found on {:?}", + difference.len(), + src_vc_url + )); + } + request_pubkeys + } + }; + + let src_keystores_map: HashMap<_, _> = src_keystores + .iter() + .map(|k| (k.validating_pubkey, k)) + .collect(); + + let count = pubkeys_to_move.len(); + for (i, &pubkey_to_move) in pubkeys_to_move.iter().enumerate() { + // Skip read-only validators rather than exiting. This makes it a bit easier to use the + // "all" flag. + if src_keystores_map + .get(&pubkey_to_move) + .ok_or("Inconsistent src keystore map")? + .readonly + .unwrap_or(true) + { + eprintln!("Skipping read-only validator {:?}", pubkey_to_move); + } + + let request = DeleteKeystoresRequest { + pubkeys: vec![pubkey_to_move], + }; + let deleted = match src_http_client.delete_lighthouse_keystores(&request).await { + Ok(deleted) => deleted, + Err(e) => { + match src_http_client.get_keystores().await { + Ok(response) => { + if response + .data + .iter() + .any(|v| v.validating_pubkey == pubkey_to_move) + { + eprintln!( + "There was an error removing a validator, however the validator \ + is still present on the source validator client. The recommended \ + solution is to run this command again." + ); + } + } + Err(_) => { + eprintln!( + "There was an error removing a validator and it's unclear if \ + the validator was removed or not. Manual user intervention is \ + required." + ); + } + }; + + return Err(format!("Deleting {:?} failed with {:?}", pubkey_to_move, e)); + } + }; + + let ExportKeystoresResponse { + mut data, + slashing_protection, + } = deleted; + + if data.len() != 1 { + return Err(format!( + "Too many deleted validators from VC: {}", + data.len() + )); + } + + let exported_validator = data + .pop() + .ok_or("VC responded with zero deleted validators")?; + + let (voting_keystore, voting_keystore_password) = match exported_validator { + SingleExportKeystoresResponse { + status: + Status { + status: DeleteKeystoreStatus::Deleted, + message: _, + }, + validating_keystore, + validating_keystore_password, + } => match (validating_keystore, validating_keystore_password) { + (Some(keystore), Some(password)) => (keystore, password), + (Some(keystore), None) => { + eprintln!( + "Validator {:?} requires a password, please provide it to continue \ + moving validators. \ + The dest VC will store this password on its filesystem and the password \ + will not be required next time the dest VC starts. \ + If the provided password is incorrect the user will \ + be asked to provide another password. \ + Failing to provide the correct password now will \ + result in the keystore being deleted from the src VC \ + without being transfered to the dest VC. \ + It is strongly recommend to provide a password now rather than exiting.", + pubkey_to_move + ); + + // Read the password from the user, retrying if the password is incorrect. + loop { + match password_source.read_password(&pubkey_to_move) { + Ok(password) => { + if let Err(e) = keystore.decrypt_keypair(password.as_ref()) { + eprintln!("Failed to decrypt keystore: {:?}", e); + } else { + break (keystore, password); + } + } + Err(e) => { + eprintln!( + "Retrying after error: {:?}. If this error persists the user will need to \ + manually recover their keystore for validator {:?} from the mnemonic." + , + e, pubkey_to_move + ); + } + } + + // Add a sleep here to prevent spamming the console. + sleep(Duration::from_secs(1)).await; + } + } + (None, password_opt) => { + eprintln!( + "Validator {:?} was not moved since the validator client did \ + not return a keystore. It is likely that the \ + validator has been deleted from the source validator client \ + without being moved to the destination validator client. \ + This validator will most likely need to be manually recovered \ + from a mnemonic or backup.", + pubkey_to_move + ); + return Err(format!( + "VC returned deleted but keystore not present (password {})", + password_opt.is_some() + )); + } + }, + SingleExportKeystoresResponse { + status: Status { status, .. }, + .. + } if matches!( + status, + DeleteKeystoreStatus::NotFound | DeleteKeystoreStatus::NotActive + ) => + { + eprintln!( + "Validator {:?} was not moved since it was not found or not active. This scenario \ + is unexpected and might indicate that another process is also performing \ + an export from the source validator client. Exiting now for safety. \ + If there is definitely no other process exporting validators then it \ + may be safe to run this command again.", + pubkey_to_move + ); + return Err(format!( + "VC indicated that a previously known validator was {:?}", + status, + )); + } + SingleExportKeystoresResponse { + status: Status { status, message }, + .. + } => { + eprintln!( + "Validator {:?} was not moved because the source validator client \ + indicated there was an error disabling it. Manual intervention is \ + required to recover from this scenario.", + pubkey_to_move + ); + return Err(format!( + "VC returned status {:?} with message {:?}", + status, message + )); + } + }; + + let keystore_derivation_path = voting_keystore.0.path(); + + let validator_specification = ValidatorSpecification { + voting_keystore, + voting_keystore_password, + slashing_protection: Some(InterchangeJsonStr(slashing_protection)), + fee_recipient, + gas_limit, + builder_proposals, + // Allow the VC to choose a default "enabled" state. Since "enabled" is not part of + // the standard API, leaving this as `None` means we are not forced to use the + // non-standard API. + enabled: None, + }; + + // We might as well just ignore validators that already exist on the destination machine, + // there doesn't appear to be much harm just adding them again and removing them from the + // source VC is an improvement. + let ignore_duplicates = true; + + loop { + match validator_specification + .clone() + .upload(&dest_http_client, ignore_duplicates) + .await + { + Ok(status) => { + match status.status { + ImportKeystoreStatus::Imported => { + eprintln!("Moved keystore {} of {}", i + 1, count); + break; + } + ImportKeystoreStatus::Duplicate => { + eprintln!("Moved duplicate keystore {} of {} to the VC", i + 1, count); + break; + } + ImportKeystoreStatus::Error => { + eprintln!( + "Upload of keystore {} of {} failed with message: {:?}.", + i + 1, + count, + status.message, + ); + // Retry uploading this validator. + sleep_with_retry_message( + &pubkey_to_move, + keystore_derivation_path.as_deref(), + ) + .await; + } + } + } + e @ Err(UploadError::InvalidPublicKey) => { + eprintln!("Validator {} has an invalid public key", i); + return Err(format!("{:?}", e)); + } + Err(UploadError::DuplicateValidator(_)) => { + return Err( + "Duplicate validator detected when duplicates are ignored".to_string() + ); + } + Err(UploadError::FailedToListKeys(e)) => { + eprintln!( + "Failed to list keystores. Some keys may have been moved whilst \ + others may not. Error was {:?}", + e + ); + // Retry uploading this validator. + sleep_with_retry_message(&pubkey_to_move, keystore_derivation_path.as_deref()) + .await; + } + Err(UploadError::KeyUploadFailed(e)) => { + eprintln!( + "Failed to upload keystore. Some keys may have been moved whilst \ + others may not. Error was {:?}", + e + ); + // Retry uploading this validator. + sleep_with_retry_message(&pubkey_to_move, keystore_derivation_path.as_deref()) + .await; + } + Err(UploadError::IncorrectStatusCount(count)) => { + eprintln!( + "Keystore was uploaded, however the validator client returned an invalid response." + ); + return Err(format!( + "Invalid status count in import response: {}", + count + )); + } + Err(UploadError::FeeRecipientUpdateFailed(e)) => { + eprintln!( + "Failed to set fee recipient for validator {}. This value may need \ + to be set manually. Continuing with other validators. Error was {:?}", + i, e + ); + // Continue onto the next validator. + break; + } + Err(UploadError::PatchValidatorFailed(e)) => { + eprintln!( + "Failed to set some values on validator {} (e.g., builder, enabled or gas limit). \ + These values value may need to be set manually. Continuing with other validators. \ + Error was {:?}", + i, e + ); + // Continue onto the next validator. + break; + } + } + eprintln!( + "Uploaded keystore {} of {} to the destination VC", + i + 1, + count + ); + } + } + + eprintln!("Done."); + + Ok(()) +} + +async fn sleep_with_retry_message(pubkey: &PublicKeyBytes, path: Option<&str>) { + let path = path.unwrap_or(""); + eprintln!( + "Sleeping for {:?} before retrying. Exiting the application before it completes \ + may result in the loss of a validator keystore. The keystore would need to be \ + restored from a backup or mnemonic. The keystore which may be lost has a public \ + key of {:?} and a derivation path of {}", + UPLOAD_RETRY_WAIT, pubkey, path + ); + sleep(UPLOAD_RETRY_WAIT).await +} + +// The tests use crypto and are too slow in debug. +#[cfg(not(debug_assertions))] +#[cfg(test)] +mod test { + use super::*; + use crate::import_validators::tests::TestBuilder as ImportTestBuilder; + use account_utils::validator_definitions::SigningDefinition; + use std::fs; + use tempfile::{tempdir, TempDir}; + use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + + const SRC_VC_TOKEN_FILE_NAME: &str = "src_vc_token.json"; + const DEST_VC_TOKEN_FILE_NAME: &str = "dest_vc_token.json"; + + type MutatePasswordFn = Box>)>; + + struct TestBuilder { + src_import_builder: Option, + dest_import_builder: Option, + http_config: HttpConfig, + duplicates: usize, + dir: TempDir, + move_back_again: bool, + remove_passwords_from_src_vc: bool, + mutate_passwords: Option, + passwords: HashMap>, + use_password_files: bool, + reuse_password_files: Option, + } + + impl TestBuilder { + async fn new() -> Self { + let dir = tempdir().unwrap(); + Self { + src_import_builder: None, + dest_import_builder: None, + http_config: ApiTester::default_http_config(), + duplicates: 0, + dir, + move_back_again: false, + remove_passwords_from_src_vc: false, + mutate_passwords: None, + passwords: <_>::default(), + use_password_files: false, + reuse_password_files: None, + } + } + + fn move_back_again(mut self) -> Self { + self.move_back_again = true; + self + } + + fn use_password_files(mut self) -> Self { + self.use_password_files = true; + self.http_config.store_passwords_in_secrets_dir = true; + self + } + + fn reuse_password_files(mut self, index: usize) -> Self { + self.reuse_password_files = Some(index); + self + } + + async fn with_src_validators(mut self, count: u32, first_index: u32) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + self.src_import_builder = Some(builder); + self + } + + async fn with_dest_validators(mut self, count: u32, first_index: u32) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + self.dest_import_builder = Some(builder); + self + } + + fn register_duplicates(mut self, num_duplicates: usize) -> Self { + self.duplicates = num_duplicates; + self + } + + fn remove_passwords_from_src_vc(mut self) -> Self { + self.remove_passwords_from_src_vc = true; + self + } + + fn mutate_passwords>) + 'static>( + mut self, + func: F, + ) -> Self { + self.mutate_passwords = Some(Box::new(func)); + self + } + + async fn move_validators( + &self, + gen_validators_enum: F, + src_vc: &ApiTester, + dest_vc: &ApiTester, + ) -> Result<(), String> + where + F: Fn(&[PublicKeyBytes]) -> Validators, + { + let src_vc_token_path = self.dir.path().join(SRC_VC_TOKEN_FILE_NAME); + fs::write(&src_vc_token_path, &src_vc.api_token).unwrap(); + let (src_vc_client, src_vc_initial_keystores) = + vc_http_client(src_vc.url.clone(), &src_vc_token_path) + .await + .unwrap(); + + let src_vc_initial_pubkeys: Vec<_> = src_vc_initial_keystores + .iter() + .map(|k| k.validating_pubkey) + .collect(); + let validators = gen_validators_enum(&src_vc_initial_pubkeys); + + let dest_vc_token_path = self.dir.path().join(DEST_VC_TOKEN_FILE_NAME); + fs::write(&dest_vc_token_path, &dest_vc.api_token).unwrap(); + + let (dest_vc_client, dest_vc_initial_keystores) = + vc_http_client(dest_vc.url.clone(), &dest_vc_token_path) + .await + .unwrap(); + + let move_config = MoveConfig { + src_vc_url: src_vc.url.clone(), + src_vc_token_path, + dest_vc_url: dest_vc.url.clone(), + dest_vc_token_path: dest_vc_token_path.clone(), + validators: validators.clone(), + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + password_source: PasswordSource::Testing(self.passwords.clone()), + }; + + let result = run(move_config).await; + + if result.is_ok() { + let src_vc_final_keystores = src_vc_client.get_keystores().await.unwrap().data; + let dest_vc_final_keystores = dest_vc_client.get_keystores().await.unwrap().data; + + src_vc.ensure_key_cache_consistency().await; + dest_vc.ensure_key_cache_consistency().await; + + match validators { + Validators::All => { + assert!( + src_vc_final_keystores.is_empty(), + "all keystores should be removed from source vc" + ); + assert_eq!( + dest_vc_final_keystores.len(), + dest_vc_initial_keystores.len() + src_vc_initial_keystores.len() + - self.duplicates, + "the correct count of keystores should have been moved to the dest" + ); + for initial_keystore in &src_vc_initial_keystores { + assert!( + dest_vc_final_keystores.contains(initial_keystore), + "the source keystore should be present at the dest" + ); + assert!( + !src_vc + .secrets_dir + .path() + .join(format!("{:?}", initial_keystore.validating_pubkey)) + .exists(), + "the source password file should be deleted" + ) + } + } + Validators::Count(count) => { + assert_eq!( + src_vc_final_keystores.len(), + src_vc_initial_keystores.len() - count, + "keystores should be removed from source vc" + ); + assert_eq!( + dest_vc_final_keystores.len(), + dest_vc_initial_keystores.len() + count - self.duplicates, + "the correct count of keystores should have been moved to the dest" + ); + let moved_keystores: Vec<_> = { + let initial_set: HashSet<_> = src_vc_initial_keystores.iter().collect(); + let final_set: HashSet<_> = src_vc_final_keystores.iter().collect(); + initial_set.difference(&final_set).cloned().collect() + }; + assert_eq!(moved_keystores.len(), count); + for moved_keystore in &moved_keystores { + assert!( + dest_vc_final_keystores.contains(moved_keystore), + "the moved keystore should be present at the dest" + ); + assert!( + !src_vc + .secrets_dir + .path() + .join(format!("{:?}", moved_keystore.validating_pubkey)) + .exists(), + "the source password file should be deleted" + ) + } + } + Validators::Specific(pubkeys) => { + assert_eq!( + src_vc_final_keystores.len(), + src_vc_initial_keystores + .len() + .checked_sub(pubkeys.len()) + .unwrap(), + "the correct count of validators should have been removed from the src" + ); + assert_eq!( + dest_vc_final_keystores.len(), + dest_vc_initial_keystores.len() + pubkeys.len() - self.duplicates, + "the correct count of keystores should have been moved to the dest" + ); + for pubkey in pubkeys { + let initial_keystore = src_vc_initial_keystores + .iter() + .find(|k| k.validating_pubkey == pubkey) + .unwrap(); + assert!( + !src_vc_final_keystores.contains(initial_keystore), + "the keystore should not be present at the source" + ); + assert!( + dest_vc_final_keystores.contains(initial_keystore), + "the keystore should be present at the dest" + ); + if self.reuse_password_files.is_some() { + assert!( + src_vc + .secrets_dir + .path() + .join(format!("{:?}", pubkey)) + .exists(), + "the source password file was used by another validator and should not be deleted" + ) + } else { + assert!( + !src_vc + .secrets_dir + .path() + .join(format!("{:?}", pubkey)) + .exists(), + "the source password file should be deleted" + ) + } + } + } + } + + // If enabled, check that all VCs still have the password files for their validators. + if self.use_password_files { + src_vc_final_keystores + .iter() + .map(|keystore| (&src_vc, keystore)) + .chain( + dest_vc_final_keystores + .iter() + .map(|keystore| (&dest_vc, keystore)), + ) + .for_each(|(vc, keystore)| { + assert!( + vc.secrets_dir + .path() + .join(format!("{:?}", keystore.validating_pubkey)) + .exists(), + "the password file should exist" + ) + }); + } + } + + result + } + + async fn run_test(mut self, gen_validators_enum: F) -> TestResult + where + F: Fn(&[PublicKeyBytes]) -> Validators + Copy, + { + let src_vc = if let Some(import_builder) = self.src_import_builder.take() { + let import_test_result = import_builder.run_test().await; + assert!(import_test_result.result.is_ok()); + import_test_result.vc + } else { + ApiTester::new_with_http_config(self.http_config.clone()).await + }; + + // If enabled, set all the validator definitions on the src_vc to + // use the same password path as the given `master_index`. This + // helps test that we don't delete a password file if it's in use by + // another validator. + if let Some(primary_index) = self.reuse_password_files { + let mut initialized_validators = src_vc.initialized_validators.write(); + let definitions = initialized_validators.as_mut_slice_testing_only(); + // Find the path of the "primary" definition. + let primary_path = definitions + .get(primary_index) + .map(|def| match &def.signing_definition { + SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } => path.clone(), + _ => panic!("primary index does not have password path"), + }) + .unwrap(); + // Set all definitions to use the same password path as the primary. + definitions.iter_mut().enumerate().for_each(|(_, def)| { + match &mut def.signing_definition { + SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } => *path = primary_path.clone(), + _ => (), + } + }) + } + + let dest_vc = if let Some(import_builder) = self.dest_import_builder.take() { + let import_test_result = import_builder.run_test().await; + assert!(import_test_result.result.is_ok()); + import_test_result.vc + } else { + ApiTester::new_with_http_config(self.http_config.clone()).await + }; + + if self.remove_passwords_from_src_vc { + let passwords = src_vc + .initialized_validators + .write() + .delete_passwords_from_validator_definitions() + .unwrap(); + + self.passwords = passwords + .into_iter() + .map(|(pubkey, password)| { + ( + PublicKeyBytes::from(&pubkey), + vec![password.as_str().to_string()], + ) + }) + .collect(); + + if let Some(func) = self.mutate_passwords.take() { + func(&mut self.passwords) + } + } + + let result = self + .move_validators(gen_validators_enum, &src_vc, &dest_vc) + .await; + + if self.move_back_again { + self.move_validators(gen_validators_enum, &dest_vc, &src_vc) + .await + .unwrap(); + } + + TestResult { result } + } + } + + #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. + struct TestResult { + result: Result<(), String>, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + + fn assert_err(self) { + assert!(self.result.is_err()) + } + + fn assert_err_is(self, msg: String) { + assert_eq!(self.result, Err(msg)) + } + } + + #[tokio::test] + async fn no_validators() { + TestBuilder::new() + .await + .run_test(|_| Validators::All) + .await + .assert_err_is(NO_VALIDATORS_MSG.to_string()); + } + + #[tokio::test] + async fn one_validator_move_all() { + TestBuilder::new() + .await + .with_src_validators(1, 0) + .await + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn one_validator_move_one() { + TestBuilder::new() + .await + .with_src_validators(1, 0) + .await + .run_test(|pubkeys| Validators::Specific(pubkeys.to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn one_validator_to_non_empty_dest() { + TestBuilder::new() + .await + .with_src_validators(1, 0) + .await + .with_dest_validators(1, 10) + .await + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn two_validators_move_all_where_one_is_a_duplicate() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .with_dest_validators(1, 1) + .await + .register_duplicates(1) + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn two_validators_move_one_where_one_is_a_duplicate() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .with_dest_validators(2, 0) + .await + .register_duplicates(1) + .run_test(|pubkeys| Validators::Specific(pubkeys[0..1].to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_all() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_one() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|pubkeys| Validators::Specific(pubkeys[0..1].to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_two() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|pubkeys| Validators::Specific(pubkeys[0..2].to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_three() { + TestBuilder::new() + .await + .with_src_validators(3, 42) + .await + .run_test(|pubkeys| Validators::Specific(pubkeys.to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_one_by_count() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|_| Validators::Count(1)) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_two_by_count() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|_| Validators::Count(2)) + .await + .assert_ok(); + } + + #[tokio::test] + async fn one_validators_move_two_by_count() { + TestBuilder::new() + .await + .with_src_validators(1, 0) + .await + .run_test(|_| Validators::Count(2)) + .await + .assert_err(); + } + + #[tokio::test] + async fn two_validator_move_all_and_back_again() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .move_back_again() + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn two_validator_move_all_passwords_removed() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .remove_passwords_from_src_vc() + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + /// This test simulates a src VC that doesn't know the keystore passwords + /// and provide the wrong password before providing the correct password. + #[tokio::test] + async fn two_validator_move_all_passwords_removed_failed_password_attempt() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .remove_passwords_from_src_vc() + .mutate_passwords(|passwords| { + passwords.iter_mut().for_each(|(_, passwords)| { + passwords.insert(0, "wrong-password".to_string()); + passwords.push("wrong-password".to_string()); + }) + }) + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + /// This test simulates a src VC that doesn't know the keystore passwords + /// and we have not provided the correct password. + #[should_panic] + #[tokio::test] + async fn two_validator_move_all_passwords_removed_without_correct_password() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .remove_passwords_from_src_vc() + .mutate_passwords(|passwords| { + passwords + .iter_mut() + .for_each(|(_, passwords)| *passwords = vec!["wrong-password".to_string()]) + }) + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn one_validator_move_all_with_password_files() { + TestBuilder::new() + .await + .use_password_files() + .with_src_validators(1, 0) + .await + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn two_validators_move_one_with_identical_password_files() { + TestBuilder::new() + .await + .use_password_files() + // The password file for validator 0 will be shared with other + // validators on the src vc. + .reuse_password_files(0) + .with_src_validators(2, 0) + .await + .run_test(|validators| Validators::Specific(validators[0..1].to_vec())) + .await + .assert_ok(); + } +} diff --git a/validator_manager/test_vectors/.gitignore b/validator_manager/test_vectors/.gitignore new file mode 100644 index 0000000000..3fec32c842 --- /dev/null +++ b/validator_manager/test_vectors/.gitignore @@ -0,0 +1 @@ +tmp/ diff --git a/validator_manager/test_vectors/generate.py b/validator_manager/test_vectors/generate.py new file mode 100644 index 0000000000..722414de73 --- /dev/null +++ b/validator_manager/test_vectors/generate.py @@ -0,0 +1,123 @@ +# This script uses the `ethereum/staking-deposit-cli` tool to generate +# deposit data files which are then used for testing by Lighthouse. +# +# To generate vectors, simply run this Python script: +# +# `python generate.py` +# +import os +import sys +import shutil +import subprocess +from subprocess import Popen, PIPE, STDOUT + + +NUM_VALIDATORS=3 +TEST_MNEMONIC = "test test test test test test test test test test test waste" +WALLET_NAME="test_wallet" + + +tmp_dir = os.path.join(".", "tmp") +mnemonic_path = os.path.join(tmp_dir, "mnemonic.txt") +sdc_dir = os.path.join(tmp_dir, "sdc") +sdc_git_dir = os.path.join(sdc_dir, "staking-deposit-cli") +vectors_dir = os.path.join(".", "vectors") + + +def setup(): + cleanup() + + if os.path.exists(vectors_dir): + shutil.rmtree(vectors_dir) + + os.mkdir(tmp_dir) + os.mkdir(sdc_dir) + os.mkdir(vectors_dir) + + setup_sdc() + with open(mnemonic_path, "x") as file: + file.write(TEST_MNEMONIC) + + +def cleanup(): + if os.path.exists(tmp_dir): + shutil.rmtree(tmp_dir) + + # Remove all the keystores since we don't use them in testing. + if os.path.exists(vectors_dir): + for root, dirs, files in os.walk(vectors_dir): + for file in files: + if file.startswith("keystore"): + os.remove(os.path.join(root, file)) + + +def setup_sdc(): + result = subprocess.run([ + "git", + "clone", + "--single-branch", + "https://github.com/ethereum/staking-deposit-cli.git", + str(sdc_git_dir) + ]) + assert(result.returncode == 0) + result = subprocess.run([ + "pip", + "install", + "-r", + "requirements.txt", + ], cwd=sdc_git_dir) + assert(result.returncode == 0) + result = subprocess.run([ + "python", + "setup.py", + "install", + ], cwd=sdc_git_dir) + assert(result.returncode == 0) + + +def sdc_generate(network, first_index, count, eth1_withdrawal_address=None): + if eth1_withdrawal_address is not None: + eth1_flags = ['--eth1_withdrawal_address', eth1_withdrawal_address] + uses_eth1 = True + else: + eth1_flags = [] + uses_eth1 = False + + test_name = "{}_first_{}_count_{}_eth1_{}".format(network, first_index, count, + str(uses_eth1).lower()) + output_dir = os.path.join(vectors_dir, test_name) + os.mkdir(output_dir) + + command = [ + '/bin/sh', + 'deposit.sh', + '--language', 'english', + '--non_interactive', + 'existing-mnemonic', + '--validator_start_index', str(first_index), + '--num_validators', str(count), + '--mnemonic', TEST_MNEMONIC, + '--chain', network, + '--keystore_password', 'MyPassword', + '--folder', os.path.abspath(output_dir), + ] + eth1_flags + + print("Running " + test_name) + process = Popen(command, cwd=sdc_git_dir, text=True, stdin = PIPE) + process.wait() + + +def test_network(network): + sdc_generate(network, first_index=0, count=1) + sdc_generate(network, first_index=0, count=2) + sdc_generate(network, first_index=12, count=1) + sdc_generate(network, first_index=99, count=2) + sdc_generate(network, first_index=1024, count=3) + sdc_generate(network, first_index=0, count=2, + eth1_withdrawal_address="0x0f51bb10119727a7e5ea3538074fb341f56b09ad") + + +setup() +test_network("mainnet") +test_network("prater") +cleanup() diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json new file mode 100644 index 0000000000..31c00c57f2 --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json new file mode 100644 index 0000000000..2880b7724c --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json new file mode 100644 index 0000000000..da92a1d0d9 --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json new file mode 100644 index 0000000000..9cc01dc0df --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json @@ -0,0 +1 @@ +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json new file mode 100644 index 0000000000..3a971d0959 --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json @@ -0,0 +1 @@ +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json new file mode 100644 index 0000000000..2efa5c4ec8 --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json @@ -0,0 +1 @@ +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json b/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json new file mode 100644 index 0000000000..c736d75b7e --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json new file mode 100644 index 0000000000..e86500d14f --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "87b4b4e9c923aa9e1687219e9df0e838956ee6e15b7ab18142467430d00940dc7aa243c9996e85125dfe72d9dbdb00a30a36e16a2003ee0c86f29c9f5d74f12bfe5b7f62693dbf5187a093555ae8d6b48acd075788549c4b6a249b397af24cd0", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "ea80b639356a03f6f58e4acbe881fabefc9d8b93375a6aa7e530c77d7e45d3e4", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json new file mode 100644 index 0000000000..c79ae5a4fc --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "ab32595d8201c2b4e8173aece9151fdc15f4d2ad36008462d0416598ddbf0f37ed0877f06d284a9669e73dbc0885bd2207fe64385e95a4488dc2bcb2c324d5c20da3248a6244463583dfbba8db20805765421e59cb56b0bc3ee6d24a9218216d", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "b4df3a3a26dd5f6eb32999d8a7051a7d1a8573a16553d4b45ee706a0d59c1066", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "9655e195eda5517efe6f36bcebd45250c889a4177d7bf5fcd59598d2d03f37f038b5ee2ec079a30a8382ea42f351943f08a6f006bab9c2130db2742bd7315c8ad5aa1f03a0801c26d4c9efdef71c4c59c449c7f9b21fa62600ab8f5f1e2b938a", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7661474fba11bfb453274f62df022cab3c0b6f4a58af4400f6bce83c9cb5fcb8", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json b/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json new file mode 100644 index 0000000000..136dc38554 --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json @@ -0,0 +1 @@ +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "b5dae79ce8f3d7326b46f93182981c5f3d64257a457f038caa78ec8e5cc25a9fdac52c7beb221ab2a3205404131366ad18e1e13801393b3d486819e8cca96128bf1244884a91d05dced092c74bc1e7259788f30dd3432df15f3d2f629645f345", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "94213d76aba9e6a434589d4939dd3764e0832df78f66d30db22a760c14ba1b89", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "816f38a321c4f84ad5187eda58f6d9c1fd1e81c860ed1722bdb76b920fdd430a1e814b9bb893837ae3b38ad738684fbf1795fa687f617c52121472b1ac8d2e34e5c1127186233a8833ffb54c509d9e52cb7242c6c6a65b5e496296b3caa90d89", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "7ad1d059d69794680a1deef5e72c33827f0c449a5f0917095821c0343572789d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "95d20c35484dea6b2a0bd7c2da2d2e810d7829e14c03657b2524adfc2111aa5ed95908ecb975ff75ff742c68ce8df417016c048959b0f807675430f6d981478e26d48e594e0830a0406da9817f8a1ecb94bd8be1f9281eeb5e952a82173c72bb", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "83abfb2a166f7af708526a9bdd2767c4be3cd231c9bc4e2f047a80df88a2860c", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json b/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json new file mode 100644 index 0000000000..ccd2ece069 --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json @@ -0,0 +1 @@ +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "8f75836ceb390dd4fc8c16bc4be52ca09b9c5aa0ab5bc16dcfdb344787b29ddfd76d877b0a2330bc8e904b233397c6bd124845d1b868e4951cb6daacea023c986bdf0c6ac28d73f65681d941ea96623bc23acc7c84dcfc1304686240d9171cfc", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "3011f5cac32f13e86ecc061e89ed6675c27a46ab6ecb1ec6f6e5f133ae1d0287", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json b/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json new file mode 100644 index 0000000000..2ab5908307 --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json @@ -0,0 +1 @@ +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a7706e102bfb0b986a5c8050044f7e221919463149771a92c3ca46ff7d4564867db48eaf89b5237fed8db2cdb9c9c057099d0982bbdb3fbfcbe0ab7259ad3f31f7713692b78ee25e6251982e7081d049804632b70b8a24d8c3e59b624a0bd221", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "8a26fbee0c3a99fe090af1fce68afc525b4e7efa70df72abaa91f29148b2f672", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8b7aa5b0e97d15ec8c2281b919fde9e064f6ac064b163445ea99441ab063f9d10534bfde861b5606021ae46614ff075e0c2305ce5a6cbcc9f0bc8e7df1a177c4d969a5ed4ac062b0ea959bdac963fe206b73565a1a3937adcca736c6117c15f0", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "d38575167a94b516455c5b7e36d24310a612fa0f4580446c5f9d45e4e94f0642", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file From b60304b19f1607c12d52e32356678cef6c7ba4e2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 8 Aug 2023 23:30:15 +0000 Subject: [PATCH 37/75] Use `BeaconProcessor` for API requests (#4462) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed NA ## Proposed Changes Rather than spawning new tasks on the tokio executor to process each HTTP API request, send the tasks to the `BeaconProcessor`. This achieves: 1. Places a bound on how many concurrent requests are being served (i.e., how many we are actually trying to compute at one time). 1. Places a bound on how many requests can be awaiting a response at one time (i.e., starts dropping requests when we have too many queued). 1. Allows the BN prioritise HTTP requests with respect to messages coming from the P2P network (i.e., proiritise importing gossip blocks rather than serving API requests). Presently there are two levels of priorities: - `Priority::P0` - The beacon processor will prioritise these above everything other than importing new blocks. - Roughly all validator-sensitive endpoints. - `Priority::P1` - The beacon processor will prioritise practically all other P2P messages over these, except for historical backfill things. - Everything that's not `Priority::P0` The `--http-enable-beacon-processor false` flag can be supplied to revert back to the old behaviour of spawning new `tokio` tasks for each request: ``` --http-enable-beacon-processor The beacon processor is a scheduler which provides quality-of-service and DoS protection. When set to "true", HTTP API requests will queued and scheduled alongside other tasks. When set to "false", HTTP API responses will be executed immediately. [default: true] ``` ## New CLI Flags I added some other new CLI flags: ``` --beacon-processor-aggregate-batch-size Specifies the number of gossip aggregate attestations in a signature verification batch. Higher values may reduce CPU usage in a healthy network while lower values may increase CPU usage in an unhealthy or hostile network. [default: 64] --beacon-processor-attestation-batch-size Specifies the number of gossip attestations in a signature verification batch. Higher values may reduce CPU usage in a healthy network whilst lower values may increase CPU usage in an unhealthy or hostile network. [default: 64] --beacon-processor-max-workers Specifies the maximum concurrent tasks for the task scheduler. Increasing this value may increase resource consumption. Reducing the value may result in decreased resource usage and diminished performance. The default value is the number of logical CPU cores on the host. --beacon-processor-reprocess-queue-len Specifies the length of the queue for messages requiring delayed processing. Higher values may prevent messages from being dropped while lower values may help protect the node from becoming overwhelmed. [default: 12288] ``` I needed to add the max-workers flag since the "simulator" flavor tests started failing with HTTP timeouts on the test assertions. I believe they were failing because the Github runners only have 2 cores and there just weren't enough workers available to process our requests in time. I added the other flags since they seem fun to fiddle with. ## Additional Info I bumped the timeouts on the "simulator" flavor test from 4s to 8s. The prioritisation of consensus messages seems to be causing slower responses, I guess this is what we signed up for 🤷 The `validator/register` validator has some special handling because the relays have a bad habit of timing out on these calls. It seems like a waste of a `BeaconProcessor` worker to just wait for the builder API HTTP response, so we spawn a new `tokio` task to wait for a builder response. I've added an optimisation for the `GET beacon/states/{state_id}/validators/{validator_id}` endpoint in [efbabe3](https://github.com/sigp/lighthouse/pull/4462/commits/efbabe32521ed6eb3564764da4e507d26a1c4bd0). That's the endpoint the VC uses to resolve pubkeys to validator indices, and it's the endpoint that was causing us grief. Perhaps I should move that into a new PR, not sure. --- Cargo.lock | 5 + beacon_node/beacon_chain/src/chain_config.rs | 3 - beacon_node/beacon_processor/Cargo.toml | 4 +- beacon_node/beacon_processor/src/lib.rs | 127 +- beacon_node/beacon_processor/src/metrics.rs | 9 + beacon_node/client/src/builder.rs | 58 +- beacon_node/client/src/config.rs | 3 + beacon_node/http_api/Cargo.toml | 13 +- beacon_node/http_api/src/lib.rs | 2050 ++++++++++------- beacon_node/http_api/src/task_spawner.rs | 214 ++ beacon_node/http_api/src/test_utils.rs | 59 +- beacon_node/http_api/src/validator.rs | 21 + beacon_node/http_api/tests/tests.rs | 10 +- .../src/network_beacon_processor/mod.rs | 22 +- .../src/network_beacon_processor/tests.rs | 37 +- beacon_node/network/src/service/tests.rs | 21 +- beacon_node/src/cli.rs | 62 + beacon_node/src/config.rs | 28 +- beacon_node/src/lib.rs | 1 + lighthouse/Cargo.toml | 1 + lighthouse/tests/beacon_node.rs | 58 +- testing/node_test_rig/src/lib.rs | 7 +- watch/Cargo.toml | 1 + watch/tests/tests.rs | 12 +- 24 files changed, 1864 insertions(+), 962 deletions(-) create mode 100644 beacon_node/http_api/src/task_spawner.rs create mode 100644 beacon_node/http_api/src/validator.rs diff --git a/Cargo.lock b/Cargo.lock index a24087c3a0..233b3901eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -601,7 +601,9 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "logging", + "num_cpus", "parking_lot 0.12.1", + "serde", "slog", "slot_clock", "strum", @@ -3266,6 +3268,7 @@ name = "http_api" version = "0.1.0" dependencies = [ "beacon_chain", + "beacon_processor", "bs58 0.4.0", "bytes", "directory", @@ -4320,6 +4323,7 @@ dependencies = [ "account_manager", "account_utils", "beacon_node", + "beacon_processor", "bls", "boot_node", "clap", @@ -8893,6 +8897,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "task_executor", "testcontainers", "tokio", "tokio-postgres", diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index efbc9905b7..d1bddcf736 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -79,8 +79,6 @@ pub struct ChainConfig { /// /// This is useful for block builders and testing. pub always_prepare_payload: bool, - /// Whether backfill sync processing should be rate-limited. - pub enable_backfill_rate_limiting: bool, /// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation. pub progressive_balances_mode: ProgressiveBalancesMode, /// Number of epochs between each migration of data from the hot database to the freezer. @@ -114,7 +112,6 @@ impl Default for ChainConfig { shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, genesis_backfill: false, always_prepare_payload: false, - enable_backfill_rate_limiting: true, progressive_balances_mode: ProgressiveBalancesMode::Checked, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, } diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 5c5200e101..c626441bb7 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -21,4 +21,6 @@ types = { path = "../../consensus/types" } ethereum_ssz = "0.5.0" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -parking_lot = "0.12.0" \ No newline at end of file +parking_lot = "0.12.0" +num_cpus = "1.13.0" +serde = { version = "1.0.116", features = ["derive"] } \ No newline at end of file diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 297c4868db..bf5d8bced4 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -48,6 +48,7 @@ use lighthouse_network::NetworkGlobals; use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; use parking_lot::Mutex; +use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::cmp; @@ -70,7 +71,7 @@ pub mod work_reprocessing_queue; /// The maximum size of the channel for work events to the `BeaconProcessor`. /// /// Setting this too low will cause consensus messages to be dropped. -pub const MAX_WORK_EVENT_QUEUE_LEN: usize = 16_384; +const DEFAULT_MAX_WORK_EVENT_QUEUE_LEN: usize = 16_384; /// The maximum size of the channel for idle events to the `BeaconProcessor`. /// @@ -79,7 +80,7 @@ pub const MAX_WORK_EVENT_QUEUE_LEN: usize = 16_384; const MAX_IDLE_QUEUE_LEN: usize = 16_384; /// The maximum size of the channel for re-processing work events. -pub const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * MAX_WORK_EVENT_QUEUE_LEN / 4; +const DEFAULT_MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * DEFAULT_MAX_WORK_EVENT_QUEUE_LEN / 4; /// The maximum number of queued `Attestation` objects that will be stored before we start dropping /// them. @@ -167,6 +168,14 @@ const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384; /// will be stored before we start dropping them. const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; +/// The maximum number of priority-0 (highest priority) messages that will be queued before +/// they begin to be dropped. +const MAX_API_REQUEST_P0_QUEUE_LEN: usize = 1_024; + +/// The maximum number of priority-1 (second-highest priority) messages that will be queued before +/// they begin to be dropped. +const MAX_API_REQUEST_P1_QUEUE_LEN: usize = 1_024; + /// The name of the manager tokio task. const MANAGER_TASK_NAME: &str = "beacon_processor_manager"; @@ -184,8 +193,8 @@ const WORKER_TASK_NAME: &str = "beacon_processor_worker"; /// Poisoning occurs when an invalid signature is included in a batch of attestations. A single /// invalid signature causes the entire batch to fail. When a batch fails, we fall-back to /// individually verifying each attestation signature. -const MAX_GOSSIP_ATTESTATION_BATCH_SIZE: usize = 64; -const MAX_GOSSIP_AGGREGATE_BATCH_SIZE: usize = 64; +const DEFAULT_MAX_GOSSIP_ATTESTATION_BATCH_SIZE: usize = 64; +const DEFAULT_MAX_GOSSIP_AGGREGATE_BATCH_SIZE: usize = 64; /// Unique IDs used for metrics and testing. pub const WORKER_FREED: &str = "worker_freed"; @@ -215,6 +224,61 @@ pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update"; pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change"; +pub const API_REQUEST_P0: &str = "api_request_p0"; +pub const API_REQUEST_P1: &str = "api_request_p1"; + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct BeaconProcessorConfig { + pub max_workers: usize, + pub max_work_event_queue_len: usize, + pub max_scheduled_work_queue_len: usize, + pub max_gossip_attestation_batch_size: usize, + pub max_gossip_aggregate_batch_size: usize, + pub enable_backfill_rate_limiting: bool, +} + +impl Default for BeaconProcessorConfig { + fn default() -> Self { + Self { + max_workers: cmp::max(1, num_cpus::get()), + max_work_event_queue_len: DEFAULT_MAX_WORK_EVENT_QUEUE_LEN, + max_scheduled_work_queue_len: DEFAULT_MAX_SCHEDULED_WORK_QUEUE_LEN, + max_gossip_attestation_batch_size: DEFAULT_MAX_GOSSIP_ATTESTATION_BATCH_SIZE, + max_gossip_aggregate_batch_size: DEFAULT_MAX_GOSSIP_AGGREGATE_BATCH_SIZE, + enable_backfill_rate_limiting: true, + } + } +} + +// The channels necessary to instantiate a `BeaconProcessor`. +pub struct BeaconProcessorChannels { + pub beacon_processor_tx: BeaconProcessorSend, + pub beacon_processor_rx: mpsc::Receiver>, + pub work_reprocessing_tx: mpsc::Sender, + pub work_reprocessing_rx: mpsc::Receiver, +} + +impl BeaconProcessorChannels { + pub fn new(config: &BeaconProcessorConfig) -> Self { + let (beacon_processor_tx, beacon_processor_rx) = + mpsc::channel(config.max_scheduled_work_queue_len); + let (work_reprocessing_tx, work_reprocessing_rx) = + mpsc::channel(config.max_scheduled_work_queue_len); + + Self { + beacon_processor_tx: BeaconProcessorSend(beacon_processor_tx), + beacon_processor_rx, + work_reprocessing_rx, + work_reprocessing_tx, + } + } +} + +impl Default for BeaconProcessorChannels { + fn default() -> Self { + Self::new(&BeaconProcessorConfig::default()) + } +} /// A simple first-in-first-out queue with a maximum length. struct FifoQueue { @@ -363,7 +427,7 @@ impl WorkEvent { } } -impl std::convert::From for WorkEvent { +impl From for WorkEvent { fn from(ready_work: ReadyWork) -> Self { match ready_work { ReadyWork::Block(QueuedGossipBlock { @@ -465,6 +529,10 @@ impl BeaconProcessorSend { pub type AsyncFn = Pin + Send + Sync>>; pub type BlockingFn = Box; pub type BlockingFnWithManualSendOnIdle = Box; +pub enum BlockingOrAsync { + Blocking(BlockingFn), + Async(AsyncFn), +} /// Indicates the type of work to be performed and therefore its priority and /// queuing specifics. @@ -523,6 +591,8 @@ pub enum Work { BlocksByRootsRequest(BlockingFnWithManualSendOnIdle), GossipBlsToExecutionChange(BlockingFn), LightClientBootstrapRequest(BlockingFn), + ApiRequestP0(BlockingOrAsync), + ApiRequestP1(BlockingOrAsync), } impl fmt::Debug for Work { @@ -560,6 +630,8 @@ impl Work { Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, Work::GossipBlsToExecutionChange(_) => GOSSIP_BLS_TO_EXECUTION_CHANGE, Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, + Work::ApiRequestP0 { .. } => API_REQUEST_P0, + Work::ApiRequestP1 { .. } => API_REQUEST_P1, } } } @@ -638,7 +710,7 @@ pub struct BeaconProcessor { pub executor: TaskExecutor, pub max_workers: usize, pub current_workers: usize, - pub enable_backfill_rate_limiting: bool, + pub config: BeaconProcessorConfig, pub log: Logger, } @@ -714,11 +786,13 @@ impl BeaconProcessor { let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); + let mut api_request_p0_queue = FifoQueue::new(MAX_API_REQUEST_P0_QUEUE_LEN); + let mut api_request_p1_queue = FifoQueue::new(MAX_API_REQUEST_P1_QUEUE_LEN); + // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = - mpsc::channel::(MAX_SCHEDULED_WORK_QUEUE_LEN); - + mpsc::channel::(self.config.max_scheduled_work_queue_len); spawn_reprocess_scheduler( ready_work_tx, work_reprocessing_rx, @@ -739,7 +813,7 @@ impl BeaconProcessor { reprocess_work_rx: ready_work_rx, }; - let enable_backfill_rate_limiting = self.enable_backfill_rate_limiting; + let enable_backfill_rate_limiting = self.config.enable_backfill_rate_limiting; loop { let work_event = match inbound_events.next().await { @@ -850,12 +924,17 @@ impl BeaconProcessor { // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, idle_tx); + // Check the priority 0 API requests after blocks, but before attestations. + } else if let Some(item) = api_request_p0_queue.pop() { + self.spawn_worker(item, idle_tx); // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us // more information with less signature verification time. } else if aggregate_queue.len() > 0 { - let batch_size = - cmp::min(aggregate_queue.len(), MAX_GOSSIP_AGGREGATE_BATCH_SIZE); + let batch_size = cmp::min( + aggregate_queue.len(), + self.config.max_gossip_aggregate_batch_size, + ); if batch_size < 2 { // One single aggregate is in the queue, process it individually. @@ -914,7 +993,7 @@ impl BeaconProcessor { } else if attestation_queue.len() > 0 { let batch_size = cmp::min( attestation_queue.len(), - MAX_GOSSIP_ATTESTATION_BATCH_SIZE, + self.config.max_gossip_attestation_batch_size, ); if batch_size < 2 { @@ -1005,6 +1084,12 @@ impl BeaconProcessor { self.spawn_worker(item, idle_tx); } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { self.spawn_worker(item, idle_tx); + // Check the priority 1 API requests after we've + // processed all the interesting things from the network + // and things required for us to stay in good repute + // with our P2P peers. + } else if let Some(item) = api_request_p1_queue.pop() { + self.spawn_worker(item, idle_tx); // Handle backfill sync chain segments. } else if let Some(item) = backfill_chain_segment.pop() { self.spawn_worker(item, idle_tx); @@ -1127,6 +1212,12 @@ impl BeaconProcessor { Work::UnknownLightClientOptimisticUpdate { .. } => { unknown_light_client_update_queue.push(work, work_id, &self.log) } + Work::ApiRequestP0 { .. } => { + api_request_p0_queue.push(work, work_id, &self.log) + } + Work::ApiRequestP1 { .. } => { + api_request_p1_queue.push(work, work_id, &self.log) + } } } } @@ -1183,6 +1274,14 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL, gossip_bls_to_execution_change_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL, + api_request_p0_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL, + api_request_p1_queue.len() as i64, + ); if aggregate_queue.is_full() && aggregate_debounce.elapsed() { error!( @@ -1299,6 +1398,10 @@ impl BeaconProcessor { task_spawner.spawn_blocking_with_manual_send_idle(work) } Work::ChainSegmentBackfill(process_fn) => task_spawner.spawn_async(process_fn), + Work::ApiRequestP0(process_fn) | Work::ApiRequestP1(process_fn) => match process_fn { + BlockingOrAsync::Blocking(process_fn) => task_spawner.spawn_blocking(process_fn), + BlockingOrAsync::Async(process_fn) => task_spawner.spawn_async(process_fn), + }, Work::GossipVoluntaryExit(process_fn) | Work::GossipProposerSlashing(process_fn) | Work::GossipAttesterSlashing(process_fn) diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 65ab0bd8fc..e14c39e9a8 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -100,6 +100,15 @@ lazy_static::lazy_static! { "beacon_processor_sync_contribution_queue_total", "Count of sync committee contributions waiting to be processed." ); + // HTTP API requests. + pub static ref BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_api_request_p0_queue_total", + "Count of P0 HTTP requesets waiting to be processed." + ); + pub static ref BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_api_request_p1_queue_total", + "Count of P1 HTTP requesets waiting to be processed." + ); /* * Attestation reprocessing queue metrics. diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 71a9b28fb0..8383963b7c 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -13,10 +13,8 @@ use beacon_chain::{ store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; -use beacon_processor::{ - work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessor, BeaconProcessorSend, - WorkEvent, MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN, -}; +use beacon_processor::BeaconProcessorConfig; +use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth2::{ @@ -37,7 +35,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::oneshot; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, @@ -76,11 +74,9 @@ pub struct ClientBuilder { http_api_config: http_api::Config, http_metrics_config: http_metrics::Config, slasher: Option>>, + beacon_processor_config: Option, + beacon_processor_channels: Option>, eth_spec_instance: T::EthSpec, - beacon_processor_send: BeaconProcessorSend, - beacon_processor_receive: mpsc::Receiver>, - work_reprocessing_tx: mpsc::Sender, - work_reprocessing_rx: mpsc::Receiver, } impl @@ -96,10 +92,6 @@ where /// /// The `eth_spec_instance` parameter is used to concretize `TEthSpec`. pub fn new(eth_spec_instance: TEthSpec) -> Self { - let (beacon_processor_send, beacon_processor_receive) = - mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); - let (work_reprocessing_tx, work_reprocessing_rx) = - mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); Self { slot_clock: None, store: None, @@ -117,10 +109,8 @@ where http_metrics_config: <_>::default(), slasher: None, eth_spec_instance, - beacon_processor_send: BeaconProcessorSend(beacon_processor_send), - beacon_processor_receive, - work_reprocessing_tx, - work_reprocessing_rx, + beacon_processor_config: None, + beacon_processor_channels: None, } } @@ -136,6 +126,12 @@ where self } + pub fn beacon_processor(mut self, config: BeaconProcessorConfig) -> Self { + self.beacon_processor_channels = Some(BeaconProcessorChannels::new(&config)); + self.beacon_processor_config = Some(config); + self + } + pub fn slasher(mut self, slasher: Arc>) -> Self { self.slasher = Some(slasher); self @@ -496,6 +492,7 @@ where chain: None, network_senders: None, network_globals: None, + beacon_processor_send: None, eth1_service: Some(genesis_service.eth1_service.clone()), log: context.log().clone(), sse_logging_components: runtime_context.sse_logging_components.clone(), @@ -573,6 +570,10 @@ where .as_ref() .ok_or("network requires a runtime_context")? .clone(); + let beacon_processor_channels = self + .beacon_processor_channels + .as_ref() + .ok_or("network requires beacon_processor_channels")?; // If gossipsub metrics are required we build a registry to record them let mut gossipsub_registry = if config.metrics_enabled { @@ -588,8 +589,8 @@ where gossipsub_registry .as_mut() .map(|registry| registry.sub_registry_with_prefix("gossipsub")), - self.beacon_processor_send.clone(), - self.work_reprocessing_tx.clone(), + beacon_processor_channels.beacon_processor_tx.clone(), + beacon_processor_channels.work_reprocessing_tx.clone(), ) .await .map_err(|e| format!("Failed to start network: {:?}", e))?; @@ -712,6 +713,14 @@ where .runtime_context .as_ref() .ok_or("build requires a runtime context")?; + let beacon_processor_channels = self + .beacon_processor_channels + .take() + .ok_or("build requires beacon_processor_channels")?; + let beacon_processor_config = self + .beacon_processor_config + .take() + .ok_or("build requires a beacon_processor_config")?; let log = runtime_context.log().clone(); let http_api_listen_addr = if self.http_api_config.enabled { @@ -721,6 +730,7 @@ where network_senders: self.network_senders.clone(), network_globals: self.network_globals.clone(), eth1_service: self.eth1_service.clone(), + beacon_processor_send: Some(beacon_processor_channels.beacon_processor_tx.clone()), sse_logging_components: runtime_context.sse_logging_components.clone(), log: log.clone(), }); @@ -784,15 +794,13 @@ where executor: beacon_processor_context.executor.clone(), max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, - enable_backfill_rate_limiting: beacon_chain - .config - .enable_backfill_rate_limiting, + config: beacon_processor_config, log: beacon_processor_context.log().clone(), } .spawn_manager( - self.beacon_processor_receive, - self.work_reprocessing_tx, - self.work_reprocessing_rx, + beacon_processor_channels.beacon_processor_rx, + beacon_processor_channels.work_reprocessing_tx, + beacon_processor_channels.work_reprocessing_rx, None, beacon_chain.slot_clock.clone(), beacon_chain.spec.maximum_gossip_clock_disparity(), diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 95a00b3749..b4deb52fc3 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,4 +1,5 @@ use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; +use beacon_processor::BeaconProcessorConfig; use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; use network::NetworkConfig; @@ -80,6 +81,7 @@ pub struct Config { pub slasher: Option, pub logger_config: LoggerConfig, pub always_prefer_builder_payload: bool, + pub beacon_processor: BeaconProcessorConfig, } impl Default for Config { @@ -107,6 +109,7 @@ impl Default for Config { validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, logger_config: LoggerConfig::default(), always_prefer_builder_payload: false, + beacon_processor: <_>::default(), } } } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 4b4a28b51e..0fa286f165 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -3,12 +3,12 @@ name = "http_api" version = "0.1.0" authors = ["Paul Hauner "] edition = "2021" -autotests = false # using a single test binary compiles faster +autotests = false # using a single test binary compiles faster [dependencies] warp = { version = "0.3.2", features = ["tls"] } serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "1.14.0", features = ["macros","sync"] } +tokio = { version = "1.14.0", features = ["macros", "sync"] } tokio-stream = { version = "0.1.3", features = ["sync"] } types = { path = "../../consensus/types" } hex = "0.4.2" @@ -27,9 +27,9 @@ slot_clock = { path = "../../common/slot_clock" } ethereum_ssz = "0.5.0" bs58 = "0.4.0" futures = "0.3.8" -execution_layer = {path = "../execution_layer"} +execution_layer = { path = "../execution_layer" } parking_lot = "0.12.0" -safe_arith = {path = "../../consensus/safe_arith"} +safe_arith = { path = "../../consensus/safe_arith" } task_executor = { path = "../../common/task_executor" } lru = "0.7.7" tree_hash = "0.5.0" @@ -40,9 +40,10 @@ logging = { path = "../../common/logging" } ethereum_serde_utils = "0.5.0" operation_pool = { path = "../operation_pool" } sensitive_url = { path = "../../common/sensitive_url" } -unused_port = {path = "../../common/unused_port"} +unused_port = { path = "../../common/unused_port" } store = { path = "../store" } bytes = "1.1.0" +beacon_processor = { path = "../beacon_processor" } [dev-dependencies] environment = { path = "../../lighthouse/environment" } @@ -52,4 +53,4 @@ genesis = { path = "../genesis" } [[test]] name = "bn_http_api_tests" -path = "tests/main.rs" \ No newline at end of file +path = "tests/main.rs" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 739371c6ee..9512d18aba 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -18,8 +18,10 @@ mod standard_block_rewards; mod state_id; mod sync_committee_rewards; mod sync_committees; +mod task_spawner; pub mod test_utils; mod ui; +mod validator; mod validator_inclusion; mod version; @@ -28,6 +30,7 @@ use beacon_chain::{ validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; +use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; @@ -57,7 +60,11 @@ use std::pin::Pin; use std::sync::Arc; use sysinfo::{System, SystemExt}; use system_health::observe_system_health_bn; -use tokio::sync::mpsc::{Sender, UnboundedSender}; +use task_spawner::{Priority, TaskSpawner}; +use tokio::sync::{ + mpsc::{Sender, UnboundedSender}, + oneshot, +}; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, @@ -67,6 +74,7 @@ use types::{ SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; +use validator::pubkey_to_validator_index; use version::{ add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response, fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2, @@ -75,11 +83,7 @@ use warp::http::StatusCode; use warp::sse::Event; use warp::Reply; use warp::{http::Response, Filter}; -use warp_utils::{ - query::multi_key_query, - task::{blocking_json_task, blocking_response_task}, - uor::UnifyingOrFilter, -}; +use warp_utils::{query::multi_key_query, uor::UnifyingOrFilter}; const API_PREFIX: &str = "eth"; @@ -111,6 +115,7 @@ pub struct Context { pub chain: Option>>, pub network_senders: Option>, pub network_globals: Option>>, + pub beacon_processor_send: Option>, pub eth1_service: Option, pub sse_logging_components: Option, pub log: Logger, @@ -127,6 +132,7 @@ pub struct Config { pub allow_sync_stalled: bool, pub spec_fork_name: Option, pub data_dir: PathBuf, + pub enable_beacon_processor: bool, } impl Default for Config { @@ -140,6 +146,7 @@ impl Default for Config { allow_sync_stalled: false, spec_fork_name: None, data_dir: PathBuf::from(DEFAULT_ROOT_DIR), + enable_beacon_processor: true, } } } @@ -488,6 +495,14 @@ pub fn serve( let app_start = std::time::Instant::now(); let app_start_filter = warp::any().map(move || app_start); + // Create a `warp` filter that provides access to the `TaskSpawner`. + let beacon_processor_send = ctx + .beacon_processor_send + .clone() + .filter(|_| config.enable_beacon_processor); + let task_spawner_filter = + warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone())); + /* * * Start of HTTP method definitions. @@ -499,17 +514,20 @@ pub fn serve( .and(warp::path("beacon")) .and(warp::path("genesis")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let genesis_data = api_types::GenesisData { - genesis_time: chain.genesis_time, - genesis_validators_root: chain.genesis_validators_root, - genesis_fork_version: chain.spec.genesis_fork_version, - }; - Ok(api_types::GenericResponse::from(genesis_data)) - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let genesis_data = api_types::GenesisData { + genesis_time: chain.genesis_time, + genesis_validators_root: chain.genesis_validators_root, + genesis_fork_version: chain.spec.genesis_fork_version, + }; + Ok(api_types::GenericResponse::from(genesis_data)) + }) + }, + ); /* * beacon/states/{state_id} @@ -523,6 +541,7 @@ pub fn serve( "Invalid state ID".to_string(), )) })) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()); // GET beacon/states/{state_id}/root @@ -530,65 +549,77 @@ pub fn serve( .clone() .and(warp::path("root")) .and(warp::path::end()) - .and_then(|state_id: StateId, chain: Arc>| { - blocking_json_task(move || { - let (root, execution_optimistic, finalized) = state_id.root(&chain)?; - Ok(root) - .map(api_types::RootData::from) - .map(api_types::GenericResponse::from) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }); + .and_then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (root, execution_optimistic, finalized) = state_id.root(&chain)?; + Ok(root) + .map(api_types::RootData::from) + .map(api_types::GenericResponse::from) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) + }) + }, + ); // GET beacon/states/{state_id}/fork let get_beacon_state_fork = beacon_states_path .clone() .and(warp::path("fork")) .and(warp::path::end()) - .and_then(|state_id: StateId, chain: Arc>| { - blocking_json_task(move || { - let (fork, execution_optimistic, finalized) = - state_id.fork_and_execution_optimistic_and_finalized(&chain)?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data: fork, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), + .and_then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (fork, execution_optimistic, finalized) = + state_id.fork_and_execution_optimistic_and_finalized(&chain)?; + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data: fork, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) }) - }) - }); + }, + ); // GET beacon/states/{state_id}/finality_checkpoints let get_beacon_state_finality_checkpoints = beacon_states_path .clone() .and(warp::path("finality_checkpoints")) .and(warp::path::end()) - .and_then(|state_id: StateId, chain: Arc>| { - blocking_json_task(move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - Ok(( - api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }, - execution_optimistic, - finalized, - )) - }, - )?; + .and_then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + finalized, + )) + }, + )?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) }) - }) - }); + }, + ); // GET beacon/states/{state_id}/validator_balances?id let get_beacon_state_validator_balances = beacon_states_path @@ -598,9 +629,10 @@ pub fn serve( .and(multi_key_query::()) .and_then( |state_id: StateId, + task_spawner: TaskSpawner, chain: Arc>, query_res: Result| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P1, move || { let query = query_res?; let (data, execution_optimistic, finalized) = state_id .map_state_and_execution_optimistic_and_finalized( @@ -655,9 +687,10 @@ pub fn serve( .and(multi_key_query::()) .and_then( |state_id: StateId, + task_spawner: TaskSpawner, chain: Arc>, query_res: Result| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P1, move || { let query = query_res?; let (data, execution_optimistic, finalized) = state_id .map_state_and_execution_optimistic_and_finalized( @@ -737,16 +770,24 @@ pub fn serve( })) .and(warp::path::end()) .and_then( - |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { - blocking_json_task(move || { + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + validator_id: ValidatorId| { + task_spawner.blocking_json_task(Priority::P1, move || { let (data, execution_optimistic, finalized) = state_id .map_state_and_execution_optimistic_and_finalized( &chain, |state, execution_optimistic, finalized| { let index_opt = match &validator_id { - ValidatorId::PublicKey(pubkey) => { - state.validators().iter().position(|v| v.pubkey == *pubkey) - } + ValidatorId::PublicKey(pubkey) => pubkey_to_validator_index( + &chain, state, pubkey, + ) + .map_err(|e| { + warp_utils::reject::custom_not_found(format!( + "unable to access pubkey cache: {e:?}", + )) + })?, ValidatorId::Index(index) => Some(*index as usize), }; @@ -797,8 +838,11 @@ pub fn serve( .and(warp::query::()) .and(warp::path::end()) .and_then( - |state_id: StateId, chain: Arc>, query: api_types::CommitteesQuery| { - blocking_json_task(move || { + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: api_types::CommitteesQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { let (data, execution_optimistic, finalized) = state_id .map_state_and_execution_optimistic_and_finalized( &chain, @@ -978,9 +1022,10 @@ pub fn serve( .and(warp::path::end()) .and_then( |state_id: StateId, + task_spawner: TaskSpawner, chain: Arc>, query: api_types::SyncCommitteesQuery| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P1, move || { let (sync_committee, execution_optimistic, finalized) = state_id .map_state_and_execution_optimistic_and_finalized( &chain, @@ -1042,8 +1087,11 @@ pub fn serve( .and(warp::query::()) .and(warp::path::end()) .and_then( - |state_id: StateId, chain: Arc>, query: api_types::RandaoQuery| { - blocking_json_task(move || { + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: api_types::RandaoQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { let (randao, execution_optimistic, finalized) = state_id .map_state_and_execution_optimistic_and_finalized( &chain, @@ -1078,10 +1126,13 @@ pub fn serve( .and(warp::path("headers")) .and(warp::query::()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |query: api_types::HeadersQuery, chain: Arc>| { - blocking_json_task(move || { + |query: api_types::HeadersQuery, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { let (root, block, execution_optimistic, finalized) = match (query.slot, query.parent_root) { // No query parameters, return the canonical head block. @@ -1175,36 +1226,41 @@ pub fn serve( )) })) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|block_id: BlockId, chain: Arc>| { - blocking_json_task(move || { - let (root, execution_optimistic, finalized) = block_id.root(&chain)?; - // Ignore the second `execution_optimistic` since the first one has more - // information about the original request. - let (block, _execution_optimistic, _finalized) = - BlockId::from_root(root).blinded_block(&chain)?; + .and_then( + |block_id: BlockId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (root, execution_optimistic, finalized) = block_id.root(&chain)?; + // Ignore the second `execution_optimistic` since the first one has more + // information about the original request. + let (block, _execution_optimistic, _finalized) = + BlockId::from_root(root).blinded_block(&chain)?; - let canonical = chain - .block_root_at_slot(block.slot(), WhenSlotSkipped::None) - .map_err(warp_utils::reject::beacon_chain_error)? - .map_or(false, |canonical| root == canonical); + let canonical = chain + .block_root_at_slot(block.slot(), WhenSlotSkipped::None) + .map_err(warp_utils::reject::beacon_chain_error)? + .map_or(false, |canonical| root == canonical); - let data = api_types::BlockHeaderData { - root, - canonical, - header: api_types::BlockHeaderAndSignature { - message: block.message().block_header(), - signature: block.signature().clone().into(), - }, - }; + let data = api_types::BlockHeaderData { + root, + canonical, + header: api_types::BlockHeaderAndSignature { + message: block.message().block_header(), + signature: block.signature().clone().into(), + }, + }; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - data, + Ok(api_types::ExecutionOptimisticFinalizedResponse { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + data, + }) }) - }) - }); + }, + ); /* * beacon/blocks @@ -1216,24 +1272,28 @@ pub fn serve( .and(warp::path("blocks")) .and(warp::path::end()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( |block: Arc>, + task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| async move { - publish_blocks::publish_block( - None, - ProvenancedBlock::local(block), - chain, - &network_tx, - log, - BroadcastValidation::default(), - ) - .await - .map(|()| warp::reply().into_response()) + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + publish_blocks::publish_block( + None, + ProvenancedBlock::local(block), + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }) }, ); @@ -1278,35 +1338,39 @@ pub fn serve( .and(warp::query::()) .and(warp::path::end()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( |validation_level: api_types::BroadcastValidationQuery, block: Arc>, + task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| async move { - match publish_blocks::publish_block( - None, - ProvenancedBlock::local(block), - chain, - &network_tx, - log, - validation_level.broadcast_validation, - ) - .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } + log: Logger| { + task_spawner.spawn_async(Priority::P1, async move { + match publish_blocks::publish_block( + None, + ProvenancedBlock::local(block), + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } + }) }, ); @@ -1371,23 +1435,27 @@ pub fn serve( .and(warp::path("blinded_blocks")) .and(warp::path::end()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( |block: SignedBeaconBlock>, + task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| async move { - publish_blocks::publish_blinded_block( - block, - chain, - &network_tx, - log, - BroadcastValidation::default(), - ) - .await - .map(|()| warp::reply().into_response()) + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }) }, ); @@ -1437,34 +1505,38 @@ pub fn serve( .and(warp::query::()) .and(warp::path::end()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( |validation_level: api_types::BroadcastValidationQuery, block: SignedBeaconBlock>, + task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| async move { - match publish_blocks::publish_blinded_block( - block, - chain, - &network_tx, - log, - validation_level.broadcast_validation, - ) - .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } + log: Logger| { + task_spawner.spawn_async(Priority::P0, async move { + match publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } + }) }, ); @@ -1530,12 +1602,14 @@ pub fn serve( .and(warp::path("beacon")) .and(warp::path("blocks")) .and(block_id_or_err) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()); let beacon_blocks_path_any = any_version .and(warp::path("beacon")) .and(warp::path("blocks")) .and(block_id_or_err) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()); // GET beacon/blocks/{block_id} @@ -1546,9 +1620,10 @@ pub fn serve( .and_then( |endpoint_version: EndpointVersion, block_id: BlockId, + task_spawner: TaskSpawner, chain: Arc>, accept_header: Option| { - async move { + task_spawner.spawn_async_with_rejection(Priority::P1, async move { let (block, execution_optimistic, finalized) = block_id.full_block(&chain).await?; let fork_name = block @@ -1576,7 +1651,7 @@ pub fn serve( .map(|res| warp::reply::json(&res).into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) - } + }) }, ); @@ -1585,44 +1660,56 @@ pub fn serve( .clone() .and(warp::path("root")) .and(warp::path::end()) - .and_then(|block_id: BlockId, chain: Arc>| { - blocking_json_task(move || { - let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; - Ok(api_types::GenericResponse::from(api_types::RootData::from( - block.canonical_root(), - )) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) - }) - }); + .and_then( + |block_id: BlockId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; + Ok(api_types::GenericResponse::from(api_types::RootData::from( + block.canonical_root(), + )) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) + }) + }, + ); // GET beacon/blocks/{block_id}/attestations let get_beacon_block_attestations = beacon_blocks_path_v1 .clone() .and(warp::path("attestations")) .and(warp::path::end()) - .and_then(|block_id: BlockId, chain: Arc>| { - blocking_json_task(move || { - let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; - Ok( - api_types::GenericResponse::from(block.message().body().attestations().clone()) - .add_execution_optimistic_finalized(execution_optimistic, finalized), - ) - }) - }); + .and_then( + |block_id: BlockId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; + Ok(api_types::GenericResponse::from( + block.message().body().attestations().clone(), + ) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) + }) + }, + ); // GET beacon/blinded_blocks/{block_id} let get_beacon_blinded_block = eth_v1 .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(block_id_or_err) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(warp::path::end()) .and(warp::header::optional::("accept")) .and_then( |block_id: BlockId, + task_spawner: TaskSpawner, chain: Arc>, accept_header: Option| { - blocking_response_task(move || { + task_spawner.blocking_response_task(Priority::P1, move || { let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; let fork_name = block @@ -1664,6 +1751,7 @@ pub fn serve( let beacon_pool_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("pool")) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()); // POST beacon/pool/attestations @@ -1675,11 +1763,12 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, attestations: Vec>, network_tx: UnboundedSender>, log: Logger| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { let seen_timestamp = timestamp_now(); let mut failures = Vec::new(); let mut num_already_known = 0; @@ -1816,8 +1905,10 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and_then( - |chain: Arc>, query: api_types::AttestationPoolQuery| { - blocking_json_task(move || { + |task_spawner: TaskSpawner, + chain: Arc>, + query: api_types::AttestationPoolQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { let query_filter = |data: &AttestationData| { query.slot.map_or(true, |slot| slot == data.slot) && query @@ -1847,10 +1938,11 @@ pub fn serve( .and(warp::body::json()) .and(network_tx_filter.clone()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, slashing: AttesterSlashing, network_tx: UnboundedSender>| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { let outcome = chain .verify_attester_slashing_for_gossip(slashing.clone()) .map_err(|e| { @@ -1887,12 +1979,14 @@ pub fn serve( .clone() .and(warp::path("attester_slashings")) .and(warp::path::end()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let attestations = chain.op_pool.get_all_attester_slashings(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestations = chain.op_pool.get_all_attester_slashings(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }, + ); // POST beacon/pool/proposer_slashings let post_beacon_pool_proposer_slashings = beacon_pool_path @@ -1902,10 +1996,11 @@ pub fn serve( .and(warp::body::json()) .and(network_tx_filter.clone()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, slashing: ProposerSlashing, network_tx: UnboundedSender>| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { let outcome = chain .verify_proposer_slashing_for_gossip(slashing.clone()) .map_err(|e| { @@ -1942,12 +2037,14 @@ pub fn serve( .clone() .and(warp::path("proposer_slashings")) .and(warp::path::end()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let attestations = chain.op_pool.get_all_proposer_slashings(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestations = chain.op_pool.get_all_proposer_slashings(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }, + ); // POST beacon/pool/voluntary_exits let post_beacon_pool_voluntary_exits = beacon_pool_path @@ -1957,10 +2054,11 @@ pub fn serve( .and(warp::body::json()) .and(network_tx_filter.clone()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, exit: SignedVoluntaryExit, network_tx: UnboundedSender>| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { let outcome = chain .verify_voluntary_exit_for_gossip(exit.clone()) .map_err(|e| { @@ -1995,12 +2093,14 @@ pub fn serve( .clone() .and(warp::path("voluntary_exits")) .and(warp::path::end()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let attestations = chain.op_pool.get_all_voluntary_exits(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestations = chain.op_pool.get_all_voluntary_exits(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }, + ); // POST beacon/pool/sync_committees let post_beacon_pool_sync_committees = beacon_pool_path @@ -2011,11 +2111,12 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, signatures: Vec, network_tx: UnboundedSender>, log: Logger| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { sync_committees::process_sync_committee_signatures( signatures, network_tx, &chain, log, )?; @@ -2029,12 +2130,14 @@ pub fn serve( .clone() .and(warp::path("bls_to_execution_changes")) .and(warp::path::end()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); - Ok(api_types::GenericResponse::from(address_changes)) - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); + Ok(api_types::GenericResponse::from(address_changes)) + }) + }, + ); // POST beacon/pool/bls_to_execution_changes let post_beacon_pool_bls_to_execution_changes = beacon_pool_path @@ -2045,11 +2148,12 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, address_changes: Vec, network_tx: UnboundedSender>, log: Logger| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { let mut failures = vec![]; for (index, address_change) in address_changes.into_iter().enumerate() { @@ -2133,10 +2237,13 @@ pub fn serve( .and(warp::path("deposit_snapshot")) .and(warp::path::end()) .and(warp::header::optional::("accept")) + .and(task_spawner_filter.clone()) .and(eth1_service_filter.clone()) .and_then( - |accept_header: Option, eth1_service: eth1::Service| { - blocking_response_task(move || match accept_header { + |accept_header: Option, + task_spawner: TaskSpawner, + eth1_service: eth1::Service| { + task_spawner.blocking_response_task(Priority::P1, move || match accept_header { Some(api_types::Accept::Json) | None => { let snapshot = eth1_service.get_deposit_snapshot(); Ok( @@ -2177,6 +2284,7 @@ pub fn serve( let beacon_rewards_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("rewards")) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()); // GET beacon/rewards/blocks/{block_id} @@ -2185,17 +2293,21 @@ pub fn serve( .and(warp::path("blocks")) .and(block_id_or_err) .and(warp::path::end()) - .and_then(|chain: Arc>, block_id: BlockId| { - blocking_json_task(move || { - let (rewards, execution_optimistic, finalized) = - standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; - Ok(rewards) - .map(api_types::GenericResponse::from) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }); + .and_then( + |task_spawner: TaskSpawner, + chain: Arc>, + block_id: BlockId| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (rewards, execution_optimistic, finalized) = + standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; + Ok(rewards) + .map(api_types::GenericResponse::from) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) + }) + }, + ); /* * beacon/rewards @@ -2204,6 +2316,7 @@ pub fn serve( let beacon_rewards_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("rewards")) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()); // POST beacon/rewards/attestations/{epoch} @@ -2214,8 +2327,11 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and_then( - |chain: Arc>, epoch: Epoch, validators: Vec| { - blocking_json_task(move || { + |task_spawner: TaskSpawner, + chain: Arc>, + epoch: Epoch, + validators: Vec| { + task_spawner.blocking_json_task(Priority::P1, move || { let attestation_rewards = chain .compute_attestation_rewards(epoch, validators) .map_err(|e| match e { @@ -2263,11 +2379,12 @@ pub fn serve( .and(warp::body::json()) .and(log_filter.clone()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, block_id: BlockId, validators: Vec, log: Logger| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P1, move || { let (rewards, execution_optimistic, finalized) = sync_committee_rewards::compute_sync_committee_rewards( chain, block_id, validators, log, @@ -2292,46 +2409,55 @@ pub fn serve( let get_config_fork_schedule = config_path .and(warp::path("fork_schedule")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let forks = ForkName::list_all() - .into_iter() - .filter_map(|fork_name| chain.spec.fork_for_name(fork_name)) - .collect::>(); - Ok(api_types::GenericResponse::from(forks)) - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let forks = ForkName::list_all() + .into_iter() + .filter_map(|fork_name| chain.spec.fork_for_name(fork_name)) + .collect::>(); + Ok(api_types::GenericResponse::from(forks)) + }) + }, + ); // GET config/spec let spec_fork_name = ctx.config.spec_fork_name; let get_config_spec = config_path .and(warp::path("spec")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(move |chain: Arc>| { - blocking_json_task(move || { - let config_and_preset = - ConfigAndPreset::from_chain_spec::(&chain.spec, spec_fork_name); - Ok(api_types::GenericResponse::from(config_and_preset)) - }) - }); + .and_then( + move |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let config_and_preset = + ConfigAndPreset::from_chain_spec::(&chain.spec, spec_fork_name); + Ok(api_types::GenericResponse::from(config_and_preset)) + }) + }, + ); // GET config/deposit_contract let get_config_deposit_contract = config_path .and(warp::path("deposit_contract")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - Ok(api_types::GenericResponse::from( - api_types::DepositContractData { - address: chain.spec.deposit_contract_address, - chain_id: chain.spec.deposit_chain_id, - }, - )) - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + Ok(api_types::GenericResponse::from( + api_types::DepositContractData { + address: chain.spec.deposit_contract_address, + chain_id: chain.spec.deposit_chain_id, + }, + )) + }) + }, + ); /* * debug @@ -2349,13 +2475,15 @@ pub fn serve( })) .and(warp::path::end()) .and(warp::header::optional::("accept")) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( |endpoint_version: EndpointVersion, state_id: StateId, accept_header: Option, + task_spawner: TaskSpawner, chain: Arc>| { - blocking_response_task(move || match accept_header { + task_spawner.blocking_response_task(Priority::P1, move || match accept_header { Some(api_types::Accept::Ssz) => { // We can ignore the optimistic status for the "fork" since it's a // specification constant that doesn't change across competing heads of the @@ -2407,10 +2535,13 @@ pub fn serve( .and(warp::path("beacon")) .and(warp::path("heads")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |endpoint_version: EndpointVersion, chain: Arc>| { - blocking_json_task(move || { + |endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { let heads = chain .heads() .into_iter() @@ -2443,48 +2574,51 @@ pub fn serve( .and(warp::path("debug")) .and(warp::path("fork_choice")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); - let proto_array = beacon_fork_choice.proto_array().core_proto_array(); + let proto_array = beacon_fork_choice.proto_array().core_proto_array(); - let fork_choice_nodes = proto_array - .nodes - .iter() - .map(|node| { - let execution_status = if node.execution_status.is_execution_enabled() { - Some(node.execution_status.to_string()) - } else { - None - }; + let fork_choice_nodes = proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; - ForkChoiceNode { - slot: node.slot, - block_root: node.root, - parent_root: node - .parent - .and_then(|index| proto_array.nodes.get(index)) - .map(|parent| parent.root), - justified_epoch: node.justified_checkpoint.epoch, - finalized_epoch: node.finalized_checkpoint.epoch, - weight: node.weight, - validity: execution_status, - execution_block_hash: node - .execution_status - .block_hash() - .map(|block_hash| block_hash.into_root()), - } + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node.justified_checkpoint.epoch, + finalized_epoch: node.finalized_checkpoint.epoch, + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + } + }) + .collect::>(); + Ok(ForkChoice { + justified_checkpoint: proto_array.justified_checkpoint, + finalized_checkpoint: proto_array.finalized_checkpoint, + fork_choice_nodes, }) - .collect::>(); - Ok(ForkChoice { - justified_checkpoint: proto_array.justified_checkpoint, - finalized_checkpoint: proto_array.finalized_checkpoint, - fork_choice_nodes, }) - }) - }); + }, + ); /* * node @@ -2495,50 +2629,54 @@ pub fn serve( .and(warp::path("node")) .and(warp::path("identity")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then(|network_globals: Arc>| { - blocking_json_task(move || { - let enr = network_globals.local_enr(); - let p2p_addresses = enr.multiaddr_p2p_tcp(); - let discovery_addresses = enr.multiaddr_p2p_udp(); - let meta_data = network_globals.local_metadata.read(); - Ok(api_types::GenericResponse::from(api_types::IdentityData { - peer_id: network_globals.local_peer_id().to_base58(), - enr, - p2p_addresses, - discovery_addresses, - metadata: api_types::MetaData { - seq_number: *meta_data.seq_number(), - attnets: format!( - "0x{}", - hex::encode(meta_data.attnets().clone().into_bytes()), - ), - syncnets: format!( - "0x{}", - hex::encode( - meta_data - .syncnets() - .map(|x| x.clone()) - .unwrap_or_default() - .into_bytes() - ) - ), - }, - })) - }) - }); + .and_then( + |task_spawner: TaskSpawner, + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let enr = network_globals.local_enr(); + let p2p_addresses = enr.multiaddr_p2p_tcp(); + let discovery_addresses = enr.multiaddr_p2p_udp(); + let meta_data = network_globals.local_metadata.read(); + Ok(api_types::GenericResponse::from(api_types::IdentityData { + peer_id: network_globals.local_peer_id().to_base58(), + enr, + p2p_addresses, + discovery_addresses, + metadata: api_types::MetaData { + seq_number: *meta_data.seq_number(), + attnets: format!( + "0x{}", + hex::encode(meta_data.attnets().clone().into_bytes()), + ), + syncnets: format!( + "0x{}", + hex::encode( + meta_data + .syncnets() + .map(|x| x.clone()) + .unwrap_or_default() + .into_bytes() + ) + ), + }, + })) + }) + }, + ); // GET node/version let get_node_version = eth_v1 .and(warp::path("node")) .and(warp::path("version")) .and(warp::path::end()) - .and_then(|| { - blocking_json_task(move || { - Ok(api_types::GenericResponse::from(api_types::VersionData { - version: version_with_platform(), - })) - }) + // Bypass the `task_spawner` since this method returns a static string. + .then(|| async { + warp::reply::json(&api_types::GenericResponse::from(api_types::VersionData { + version: version_with_platform(), + })) + .into_response() }); // GET node/syncing @@ -2546,10 +2684,13 @@ pub fn serve( .and(warp::path("node")) .and(warp::path("syncing")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(network_globals.clone()) .and(chain_filter.clone()) .and_then( - |network_globals: Arc>, chain: Arc>| { + |task_spawner: TaskSpawner, + network_globals: Arc>, + chain: Arc>| { async move { let el_offline = if let Some(el) = &chain.execution_layer { el.is_offline_or_erroring().await @@ -2557,32 +2698,34 @@ pub fn serve( true }; - blocking_json_task(move || { - let head_slot = chain.canonical_head.cached_head().head_slot(); - let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { - warp_utils::reject::custom_server_error( - "Unable to read slot clock".into(), - ) - })?; + task_spawner + .blocking_json_task(Priority::P0, move || { + let head_slot = chain.canonical_head.cached_head().head_slot(); + let current_slot = + chain.slot_clock.now_or_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error( + "Unable to read slot clock".into(), + ) + })?; - // Taking advantage of saturating subtraction on slot. - let sync_distance = current_slot - head_slot; + // Taking advantage of saturating subtraction on slot. + let sync_distance = current_slot - head_slot; - let is_optimistic = chain - .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; - let syncing_data = api_types::SyncingData { - is_syncing: network_globals.sync_state.read().is_syncing(), - is_optimistic: Some(is_optimistic), - el_offline: Some(el_offline), - head_slot, - sync_distance, - }; + let syncing_data = api_types::SyncingData { + is_syncing: network_globals.sync_state.read().is_syncing(), + is_optimistic: Some(is_optimistic), + el_offline: Some(el_offline), + head_slot, + sync_distance, + }; - Ok(api_types::GenericResponse::from(syncing_data)) - }) - .await + Ok(api_types::GenericResponse::from(syncing_data)) + }) + .await } }, ); @@ -2592,10 +2735,13 @@ pub fn serve( .and(warp::path("node")) .and(warp::path("health")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(network_globals.clone()) .and(chain_filter.clone()) .and_then( - |network_globals: Arc>, chain: Arc>| { + |task_spawner: TaskSpawner, + network_globals: Arc>, + chain: Arc>| { async move { let el_offline = if let Some(el) = &chain.execution_layer { el.is_offline_or_erroring().await @@ -2603,28 +2749,31 @@ pub fn serve( true }; - blocking_response_task(move || { - let is_optimistic = chain - .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + task_spawner + .blocking_response_task(Priority::P0, move || { + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; - let is_syncing = !network_globals.sync_state.read().is_synced(); + let is_syncing = !network_globals.sync_state.read().is_synced(); - if el_offline { - Err(warp_utils::reject::not_synced("execution layer is offline".to_string())) - } else if is_syncing || is_optimistic { - Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::PARTIAL_CONTENT, - )) - } else { - Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::OK, - )) - } - }) - .await + if el_offline { + Err(warp_utils::reject::not_synced( + "execution layer is offline".to_string(), + )) + } else if is_syncing || is_optimistic { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::PARTIAL_CONTENT, + )) + } else { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::OK, + )) + } + }) + .await } }, ); @@ -2635,10 +2784,13 @@ pub fn serve( .and(warp::path("peers")) .and(warp::path::param::()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(network_globals.clone()) .and_then( - |requested_peer_id: String, network_globals: Arc>| { - blocking_json_task(move || { + |requested_peer_id: String, + task_spawner: TaskSpawner, + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { let peer_id = PeerId::from_bytes( &bs58::decode(requested_peer_id.as_str()) .into_vec() @@ -2692,11 +2844,13 @@ pub fn serve( .and(warp::path("peers")) .and(warp::path::end()) .and(multi_key_query::()) + .and(task_spawner_filter.clone()) .and(network_globals.clone()) .and_then( |query_res: Result, + task_spawner: TaskSpawner, network_globals: Arc>| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P1, move || { let query = query_res?; let mut peers: Vec = Vec::new(); network_globals @@ -2760,38 +2914,42 @@ pub fn serve( .and(warp::path("node")) .and(warp::path("peer_count")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then(|network_globals: Arc>| { - blocking_json_task(move || { - let mut connected: u64 = 0; - let mut connecting: u64 = 0; - let mut disconnected: u64 = 0; - let mut disconnecting: u64 = 0; + .and_then( + |task_spawner: TaskSpawner, + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let mut connected: u64 = 0; + let mut connecting: u64 = 0; + let mut disconnected: u64 = 0; + let mut disconnecting: u64 = 0; - network_globals - .peers - .read() - .peers() - .for_each(|(_, peer_info)| { - let state = api_types::PeerState::from_peer_connection_status( - peer_info.connection_status(), - ); - match state { - api_types::PeerState::Connected => connected += 1, - api_types::PeerState::Connecting => connecting += 1, - api_types::PeerState::Disconnected => disconnected += 1, - api_types::PeerState::Disconnecting => disconnecting += 1, - } - }); + network_globals + .peers + .read() + .peers() + .for_each(|(_, peer_info)| { + let state = api_types::PeerState::from_peer_connection_status( + peer_info.connection_status(), + ); + match state { + api_types::PeerState::Connected => connected += 1, + api_types::PeerState::Connecting => connecting += 1, + api_types::PeerState::Disconnected => disconnected += 1, + api_types::PeerState::Disconnecting => disconnecting += 1, + } + }); - Ok(api_types::GenericResponse::from(api_types::PeerCount { - connected, - connecting, - disconnected, - disconnecting, - })) - }) - }); + Ok(api_types::GenericResponse::from(api_types::PeerCount { + connected, + connecting, + disconnected, + disconnecting, + })) + }) + }, + ); /* * validator */ @@ -2808,11 +2966,19 @@ pub fn serve( })) .and(warp::path::end()) .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then(|epoch: Epoch, chain: Arc>, log: Logger| { - blocking_json_task(move || proposer_duties::proposer_duties(epoch, &chain, &log)) - }); + .and_then( + |epoch: Epoch, + task_spawner: TaskSpawner, + chain: Arc>, + log: Logger| { + task_spawner.blocking_json_task(Priority::P0, move || { + proposer_duties::proposer_duties(epoch, &chain, &log) + }) + }, + ); // GET validator/blocks/{slot} let get_validator_blocks = any_version @@ -2826,57 +2992,61 @@ pub fn serve( .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(warp::query::()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, + task_spawner: TaskSpawner, chain: Arc>, - log: Logger| async move { - debug!( - log, - "Block production request from HTTP API"; - "slot" => slot - ); + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + debug!( + log, + "Block production request from HTTP API"; + "slot" => slot + ); - let randao_reveal = query.randao_reveal.decompress().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - })?; + let randao_reveal = query.randao_reveal.decompress().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + })?; - let randao_verification = - if query.skip_randao_verification == SkipRandaoVerification::Yes { - if !randao_reveal.is_infinity() { - return Err(warp_utils::reject::custom_bad_request( + let randao_verification = + if query.skip_randao_verification == SkipRandaoVerification::Yes { + if !randao_reveal.is_infinity() { + return Err(warp_utils::reject::custom_bad_request( "randao_reveal must be point-at-infinity if verification is skipped" .into(), )); - } - ProduceBlockVerification::NoVerification - } else { - ProduceBlockVerification::VerifyRandao - }; + } + ProduceBlockVerification::NoVerification + } else { + ProduceBlockVerification::VerifyRandao + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .await - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - .map(|response| warp::reply::json(&response).into_response()) - .map(|res| add_consensus_version_header(res, fork_name)) + fork_versioned_response(endpoint_version, fork_name, block) + .map(|response| warp::reply::json(&response).into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) + }) }, ); @@ -2892,49 +3062,53 @@ pub fn serve( .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(warp::query::()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( |slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| async move { - let randao_reveal = query.randao_reveal.decompress().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - })?; + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let randao_reveal = query.randao_reveal.decompress().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + })?; - let randao_verification = - if query.skip_randao_verification == SkipRandaoVerification::Yes { - if !randao_reveal.is_infinity() { - return Err(warp_utils::reject::custom_bad_request( + let randao_verification = + if query.skip_randao_verification == SkipRandaoVerification::Yes { + if !randao_reveal.is_infinity() { + return Err(warp_utils::reject::custom_bad_request( "randao_reveal must be point-at-infinity if verification is skipped" .into() )); - } - ProduceBlockVerification::NoVerification - } else { - ProduceBlockVerification::VerifyRandao - }; + } + ProduceBlockVerification::NoVerification + } else { + ProduceBlockVerification::VerifyRandao + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .await - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; - // Pose as a V2 endpoint so we return the fork `version`. - fork_versioned_response(V2, fork_name, block) - .map(|response| warp::reply::json(&response).into_response()) - .map(|res| add_consensus_version_header(res, fork_name)) + // Pose as a V2 endpoint so we return the fork `version`. + fork_versioned_response(V2, fork_name, block) + .map(|response| warp::reply::json(&response).into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) + }) }, ); @@ -2945,10 +3119,13 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |query: api_types::ValidatorAttestationDataQuery, chain: Arc>| { - blocking_json_task(move || { + |query: api_types::ValidatorAttestationDataQuery, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { let current_slot = chain .slot() .map_err(warp_utils::reject::beacon_chain_error)?; @@ -2977,10 +3154,13 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc>| { - blocking_json_task(move || { + |query: api_types::ValidatorAggregateAttestationQuery, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { chain .get_aggregated_attestation_by_slot_and_root( query.slot, @@ -3015,10 +3195,14 @@ pub fn serve( .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |epoch: Epoch, indices: api_types::ValidatorIndexData, chain: Arc>| { - blocking_json_task(move || { + |epoch: Epoch, + indices: api_types::ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { attester_duties::attester_duties(epoch, &indices.0, &chain) }) }, @@ -3037,10 +3221,14 @@ pub fn serve( .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |epoch: Epoch, indices: api_types::ValidatorIndexData, chain: Arc>| { - blocking_json_task(move || { + |epoch: Epoch, + indices: api_types::ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { sync_committees::sync_committee_duties(epoch, &indices.0, &chain) }) }, @@ -3053,10 +3241,13 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |sync_committee_data: SyncContributionData, chain: Arc>| { - blocking_json_task(move || { + |sync_committee_data: SyncContributionData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { chain .get_aggregated_sync_committee_contribution(&sync_committee_data) .map_err(|e| { @@ -3081,15 +3272,17 @@ pub fn serve( .and(warp::path("aggregate_and_proofs")) .and(warp::path::end()) .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(warp::body::json()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, aggregates: Vec>, network_tx: UnboundedSender>, log: Logger| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { let seen_timestamp = timestamp_now(); let mut verified_aggregates = Vec::with_capacity(aggregates.len()); let mut messages = Vec::with_capacity(aggregates.len()); @@ -3192,16 +3385,18 @@ pub fn serve( .and(warp::path("contribution_and_proofs")) .and(warp::path::end()) .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(warp::body::json()) .and(network_tx_filter) .and(log_filter.clone()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, contributions: Vec>, network_tx: UnboundedSender>, log: Logger| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { sync_committees::process_signed_contribution_and_proofs( contributions, network_tx, @@ -3220,14 +3415,16 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_subscription_tx_filter.clone()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( |subscriptions: Vec, validator_subscription_tx: Sender, + task_spawner: TaskSpawner, chain: Arc>, log: Logger| { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { for subscription in &subscriptions { chain .validator_monitor @@ -3269,45 +3466,49 @@ pub fn serve( .and(warp::path("prepare_beacon_proposer")) .and(warp::path::end()) .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) .and(warp::body::json()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, log: Logger, - preparation_data: Vec| async move { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; + preparation_data: Vec| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; - let current_slot = chain - .slot() - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + let current_slot = chain + .slot() + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - debug!( - log, - "Received proposer preparation data"; - "count" => preparation_data.len(), - ); + debug!( + log, + "Received proposer preparation data"; + "count" => preparation_data.len(), + ); - execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) - .await; + execution_layer + .update_proposer_preparation(current_epoch, &preparation_data) + .await; - chain - .prepare_beacon_proposer(current_slot) - .await - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "error updating proposer preparations: {:?}", - e - )) - })?; + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; - Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) + Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) + }) }, ); @@ -3316,127 +3517,171 @@ pub fn serve( .and(warp::path("validator")) .and(warp::path("register_validator")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) .and(warp::body::json()) .and_then( - |chain: Arc>, + |task_spawner: TaskSpawner, + chain: Arc>, log: Logger, - register_val_data: Vec| async move { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_slot = chain - .slot_clock - .now_or_genesis() - .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + register_val_data: Vec| async { + let (tx, rx) = oneshot::channel(); - debug!( - log, - "Received register validator request"; - "count" => register_val_data.len(), - ); + task_spawner + .spawn_async_with_rejection(Priority::P0, async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let head_snapshot = chain.head_snapshot(); - let spec = &chain.spec; - - let (preparation_data, filtered_registration_data): ( - Vec, - Vec, - ) = register_val_data - .into_iter() - .filter_map(|register_data| { - chain - .validator_index(®ister_data.message.pubkey) - .ok() - .flatten() - .and_then(|validator_index| { - let validator = head_snapshot - .beacon_state - .get_validator(validator_index) - .ok()?; - let validator_status = ValidatorStatus::from_validator( - validator, - current_epoch, - spec.far_future_epoch, - ) - .superstatus(); - let is_active_or_pending = - matches!(validator_status, ValidatorStatus::Pending) - || matches!(validator_status, ValidatorStatus::Active); - - // Filter out validators who are not 'active' or 'pending'. - is_active_or_pending.then_some({ - ( - ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data.message.fee_recipient, - }, - register_data, - ) - }) - }) - }) - .unzip(); - - // Update the prepare beacon proposer cache based on this request. - execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) - .await; - - // Call prepare beacon proposer blocking with the latest update in order to make - // sure we have a local payload to fall back to in the event of the blinded block - // flow failing. - chain - .prepare_beacon_proposer(current_slot) - .await - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "error updating proposer preparations: {:?}", - e - )) - })?; - - let builder = execution_layer - .builder() - .as_ref() - .ok_or(BeaconChainError::BuilderMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; - - info!( - log, - "Forwarding register validator request to connected builder"; - "count" => filtered_registration_data.len(), - ); - - builder - .post_builder_validators(&filtered_registration_data) - .await - .map(|resp| warp::reply::json(&resp).into_response()) - .map_err(|e| { - warn!( + debug!( log, - "Relay error when registering validator(s)"; - "num_registrations" => filtered_registration_data.len(), - "error" => ?e + "Received register validator request"; + "count" => register_val_data.len(), ); - // Forward the HTTP status code if we are able to, otherwise fall back - // to a server error. - if let eth2::Error::ServerMessage(message) = e { - if message.code == StatusCode::BAD_REQUEST.as_u16() { - return warp_utils::reject::custom_bad_request(message.message); - } else { - // According to the spec this response should only be a 400 or 500, - // so we fall back to a 500 here. - return warp_utils::reject::custom_server_error(message.message); - } - } - warp_utils::reject::custom_server_error(format!("{e:?}")) + + let head_snapshot = chain.head_snapshot(); + let spec = &chain.spec; + + let (preparation_data, filtered_registration_data): ( + Vec, + Vec, + ) = register_val_data + .into_iter() + .filter_map(|register_data| { + chain + .validator_index(®ister_data.message.pubkey) + .ok() + .flatten() + .and_then(|validator_index| { + let validator = head_snapshot + .beacon_state + .get_validator(validator_index) + .ok()?; + let validator_status = ValidatorStatus::from_validator( + validator, + current_epoch, + spec.far_future_epoch, + ) + .superstatus(); + let is_active_or_pending = + matches!(validator_status, ValidatorStatus::Pending) + || matches!( + validator_status, + ValidatorStatus::Active + ); + + // Filter out validators who are not 'active' or 'pending'. + is_active_or_pending.then_some({ + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data + .message + .fee_recipient, + }, + register_data, + ) + }) + }) + }) + .unzip(); + + // Update the prepare beacon proposer cache based on this request. + execution_layer + .update_proposer_preparation(current_epoch, &preparation_data) + .await; + + // Call prepare beacon proposer blocking with the latest update in order to make + // sure we have a local payload to fall back to in the event of the blinded block + // flow failing. + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + info!( + log, + "Forwarding register validator request to connected builder"; + "count" => filtered_registration_data.len(), + ); + + // It's a waste of a `BeaconProcessor` worker to just + // wait on a response from the builder (especially since + // they have frequent timeouts). Spawn a new task and + // send the response back to our original HTTP request + // task via a channel. + let builder_future = async move { + let builder = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)? + .builder() + .as_ref() + .ok_or(BeaconChainError::BuilderMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; + + builder + .post_builder_validators(&filtered_registration_data) + .await + .map(|resp| warp::reply::json(&resp).into_response()) + .map_err(|e| { + warn!( + log, + "Relay error when registering validator(s)"; + "num_registrations" => filtered_registration_data.len(), + "error" => ?e + ); + // Forward the HTTP status code if we are able to, otherwise fall back + // to a server error. + if let eth2::Error::ServerMessage(message) = e { + if message.code == StatusCode::BAD_REQUEST.as_u16() { + return warp_utils::reject::custom_bad_request( + message.message, + ); + } else { + // According to the spec this response should only be a 400 or 500, + // so we fall back to a 500 here. + return warp_utils::reject::custom_server_error( + message.message, + ); + } + } + warp_utils::reject::custom_server_error(format!("{e:?}")) + }) + }; + tokio::task::spawn(async move { tx.send(builder_future.await) }); + + // Just send a generic 200 OK from this closure. We'll + // ignore the `Ok` variant and form a proper response + // from what is sent back down the channel. + Ok(warp::reply::reply().into_response()) }) + .await?; + + // Await a response from the builder without blocking a + // `BeaconProcessor` worker. + rx.await.unwrap_or_else(|_| { + Ok(warp::reply::with_status( + warp::reply::json(&"No response from channel"), + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response()) + }) }, ); // POST validator/sync_committee_subscriptions @@ -3446,15 +3691,17 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_subscription_tx_filter) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( |subscriptions: Vec, validator_subscription_tx: Sender, + task_spawner: TaskSpawner, chain: Arc>, log: Logger | { - blocking_json_task(move || { + task_spawner.blocking_json_task(Priority::P0, move || { for subscription in subscriptions { chain .validator_monitor @@ -3489,10 +3736,14 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path::end()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |epoch: Epoch, indices: Vec, chain: Arc>| { - blocking_json_task(move || { + |epoch: Epoch, + indices: Vec, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { // Ensure the request is for either the current, previous or next epoch. let current_epoch = chain .epoch() @@ -3526,10 +3777,13 @@ pub fn serve( .and(warp::path("liveness")) .and(warp::path::end()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |request_data: api_types::LivenessRequestData, chain: Arc>| { - blocking_json_task(move || { + |request_data: api_types::LivenessRequestData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { // Ensure the request is for either the current, previous or next epoch. let current_epoch = chain .epoch() @@ -3568,8 +3822,9 @@ pub fn serve( let get_lighthouse_health = warp::path("lighthouse") .and(warp::path("health")) .and(warp::path::end()) - .and_then(|| { - blocking_json_task(move || { + .and(task_spawner_filter.clone()) + .and_then(|task_spawner: TaskSpawner| { + task_spawner.blocking_json_task(Priority::P0, move || { eth2::lighthouse::Health::observe() .map(api_types::GenericResponse::from) .map_err(warp_utils::reject::custom_bad_request) @@ -3581,13 +3836,18 @@ pub fn serve( .and(warp::path("ui")) .and(warp::path("health")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(system_info_filter) .and(app_start_filter) .and(data_dir_filter) .and(network_globals.clone()) .and_then( - |sysinfo, app_start: std::time::Instant, data_dir, network_globals| { - blocking_json_task(move || { + |task_spawner: TaskSpawner, + sysinfo, + app_start: std::time::Instant, + data_dir, + network_globals| { + task_spawner.blocking_json_task(Priority::P0, move || { let app_uptime = app_start.elapsed().as_secs(); Ok(api_types::GenericResponse::from(observe_system_health_bn( sysinfo, @@ -3604,12 +3864,15 @@ pub fn serve( .and(warp::path("ui")) .and(warp::path("validator_count")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - ui::get_validator_count(chain).map(api_types::GenericResponse::from) - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + ui::get_validator_count(chain).map(api_types::GenericResponse::from) + }) + }, + ); // POST lighthouse/ui/validator_metrics let post_lighthouse_ui_validator_metrics = warp::path("lighthouse") @@ -3617,10 +3880,13 @@ pub fn serve( .and(warp::path("validator_metrics")) .and(warp::path::end()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |request_data: ui::ValidatorMetricsRequestData, chain: Arc>| { - blocking_json_task(move || { + |request_data: ui::ValidatorMetricsRequestData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { ui::post_validator_monitor_metrics(request_data, chain) .map(api_types::GenericResponse::from) }) @@ -3633,10 +3899,13 @@ pub fn serve( .and(warp::path("validator_info")) .and(warp::path::end()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |request_data: ui::ValidatorInfoRequestData, chain: Arc>| { - blocking_json_task(move || { + |request_data: ui::ValidatorInfoRequestData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { ui::get_validator_info(request_data, chain) .map(api_types::GenericResponse::from) }) @@ -3647,21 +3916,26 @@ pub fn serve( let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then(|network_globals: Arc>| { - blocking_json_task(move || { - Ok(api_types::GenericResponse::from( - network_globals.sync_state(), - )) - }) - }); + .and_then( + |task_spawner: TaskSpawner, + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + Ok(api_types::GenericResponse::from( + network_globals.sync_state(), + )) + }) + }, + ); // GET lighthouse/nat let get_lighthouse_nat = warp::path("lighthouse") .and(warp::path("nat")) + .and(task_spawner_filter.clone()) .and(warp::path::end()) - .and_then(|| { - blocking_json_task(move || { + .and_then(|task_spawner: TaskSpawner| { + task_spawner.blocking_json_task(Priority::P1, move || { Ok(api_types::GenericResponse::from( lighthouse_network::metrics::NAT_OPEN .as_ref() @@ -3676,57 +3950,70 @@ pub fn serve( let get_lighthouse_peers = warp::path("lighthouse") .and(warp::path("peers")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then(|network_globals: Arc>| { - blocking_json_task(move || { - Ok(network_globals - .peers - .read() - .peers() - .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { - peer_id: peer_id.to_string(), - peer_info: peer_info.clone(), - }) - .collect::>()) - }) - }); + .and_then( + |task_spawner: TaskSpawner, + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + Ok(network_globals + .peers + .read() + .peers() + .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect::>()) + }) + }, + ); // GET lighthouse/peers/connected let get_lighthouse_peers_connected = warp::path("lighthouse") .and(warp::path("peers")) .and(warp::path("connected")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(network_globals) - .and_then(|network_globals: Arc>| { - blocking_json_task(move || { - Ok(network_globals - .peers - .read() - .connected_peers() - .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { - peer_id: peer_id.to_string(), - peer_info: peer_info.clone(), - }) - .collect::>()) - }) - }); + .and_then( + |task_spawner: TaskSpawner, + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + Ok(network_globals + .peers + .read() + .connected_peers() + .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect::>()) + }) + }, + ); // GET lighthouse/proto_array let get_lighthouse_proto_array = warp::path("lighthouse") .and(warp::path("proto_array")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_response_task(move || { - Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( - chain - .canonical_head - .fork_choice_read_lock() - .proto_array() - .core_proto_array(), - ))) - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + Ok::<_, warp::Rejection>(warp::reply::json( + &api_types::GenericResponseRef::from( + chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .core_proto_array(), + ), + )) + }) + }, + ); // GET lighthouse/validator_inclusion/{epoch}/{validator_id} let get_lighthouse_validator_inclusion_global = warp::path("lighthouse") @@ -3734,10 +4021,14 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path::param::()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and_then( - |epoch: Epoch, validator_id: ValidatorId, chain: Arc>| { - blocking_json_task(move || { + |epoch: Epoch, + validator_id: ValidatorId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { validator_inclusion::validator_inclusion_data(epoch, &validator_id, &chain) .map(api_types::GenericResponse::from) }) @@ -3750,82 +4041,94 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path("global")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|epoch: Epoch, chain: Arc>| { - blocking_json_task(move || { - validator_inclusion::global_validator_inclusion_data(epoch, &chain) - .map(api_types::GenericResponse::from) - }) - }); + .and_then( + |epoch: Epoch, task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + validator_inclusion::global_validator_inclusion_data(epoch, &chain) + .map(api_types::GenericResponse::from) + }) + }, + ); // GET lighthouse/eth1/syncing let get_lighthouse_eth1_syncing = warp::path("lighthouse") .and(warp::path("eth1")) .and(warp::path("syncing")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let current_slot_opt = chain.slot().ok(); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let current_slot_opt = chain.slot().ok(); - chain - .eth1_chain - .as_ref() - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "Eth1 sync is disabled. See the --eth1 CLI flag.".to_string(), - ) - }) - .and_then(|eth1| { - eth1.sync_status(chain.genesis_time, current_slot_opt, &chain.spec) - .ok_or_else(|| { - warp_utils::reject::custom_server_error( - "Unable to determine Eth1 sync status".to_string(), - ) - }) - }) - .map(api_types::GenericResponse::from) - }) - }); + chain + .eth1_chain + .as_ref() + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "Eth1 sync is disabled. See the --eth1 CLI flag.".to_string(), + ) + }) + .and_then(|eth1| { + eth1.sync_status(chain.genesis_time, current_slot_opt, &chain.spec) + .ok_or_else(|| { + warp_utils::reject::custom_server_error( + "Unable to determine Eth1 sync status".to_string(), + ) + }) + }) + .map(api_types::GenericResponse::from) + }) + }, + ); // GET lighthouse/eth1/block_cache let get_lighthouse_eth1_block_cache = warp::path("lighthouse") .and(warp::path("eth1")) .and(warp::path("block_cache")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(eth1_service_filter.clone()) - .and_then(|eth1_service: eth1::Service| { - blocking_json_task(move || { - Ok(api_types::GenericResponse::from( - eth1_service - .blocks() - .read() - .iter() - .cloned() - .collect::>(), - )) - }) - }); + .and_then( + |task_spawner: TaskSpawner, eth1_service: eth1::Service| { + task_spawner.blocking_json_task(Priority::P1, move || { + Ok(api_types::GenericResponse::from( + eth1_service + .blocks() + .read() + .iter() + .cloned() + .collect::>(), + )) + }) + }, + ); // GET lighthouse/eth1/deposit_cache let get_lighthouse_eth1_deposit_cache = warp::path("lighthouse") .and(warp::path("eth1")) .and(warp::path("deposit_cache")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(eth1_service_filter) - .and_then(|eth1_service: eth1::Service| { - blocking_json_task(move || { - Ok(api_types::GenericResponse::from( - eth1_service - .deposits() - .read() - .cache - .iter() - .cloned() - .collect::>(), - )) - }) - }); + .and_then( + |task_spawner: TaskSpawner, eth1_service: eth1::Service| { + task_spawner.blocking_json_task(Priority::P1, move || { + Ok(api_types::GenericResponse::from( + eth1_service + .deposits() + .read() + .cache + .iter() + .cloned() + .collect::>(), + )) + }) + }, + ); // GET lighthouse/beacon/states/{state_id}/ssz let get_lighthouse_beacon_states_ssz = warp::path("lighthouse") @@ -3834,42 +4137,50 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path("ssz")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|state_id: StateId, chain: Arc>| { - blocking_response_task(move || { - // This debug endpoint provides no indication of optimistic status. - let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; - Response::builder() - .status(200) - .header("Content-Type", "application/ssz") - .body(state.as_ssz_bytes()) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }) - }) - }); + .and_then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + // This debug endpoint provides no indication of optimistic status. + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; + Response::builder() + .status(200) + .header("Content-Type", "application/ssz") + .body(state.as_ssz_bytes()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + }) + }, + ); // GET lighthouse/staking let get_lighthouse_staking = warp::path("lighthouse") .and(warp::path("staking")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - if chain.eth1_chain.is_some() { - Ok(()) - } else { - Err(warp_utils::reject::custom_not_found( - "staking is not enabled, \ + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + if chain.eth1_chain.is_some() { + Ok(()) + } else { + Err(warp_utils::reject::custom_not_found( + "staking is not enabled, \ see the --staking CLI flag" - .to_string(), - )) - } - }) - }); + .to_string(), + )) + } + }) + }, + ); let database_path = warp::path("lighthouse").and(warp::path("database")); @@ -3877,31 +4188,41 @@ pub fn serve( let get_lighthouse_database_info = database_path .and(warp::path("info")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| blocking_json_task(move || database::info(chain))); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || database::info(chain)) + }, + ); // POST lighthouse/database/reconstruct let post_lighthouse_database_reconstruct = database_path .and(warp::path("reconstruct")) .and(warp::path::end()) .and(not_while_syncing_filter) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - chain.store_migrator.process_reconstruction(); - Ok("success") - }) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + chain.store_migrator.process_reconstruction(); + Ok("success") + }) + }, + ); // POST lighthouse/database/historical_blocks let post_lighthouse_database_historical_blocks = database_path .and(warp::path("historical_blocks")) .and(warp::path::end()) .and(warp::body::json()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( |blocks: Vec>>, + task_spawner: TaskSpawner, chain: Arc>, log: Logger| { info!( @@ -3910,7 +4231,9 @@ pub fn serve( "count" => blocks.len(), "source" => "http_api" ); - blocking_json_task(move || database::historical_blocks(chain, blocks)) + task_spawner.blocking_json_task(Priority::P1, move || { + database::historical_blocks(chain, blocks) + }) }, ); @@ -3920,10 +4243,13 @@ pub fn serve( .and(warp::path("block_rewards")) .and(warp::query::()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then(|query, chain, log| { - blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log)) + .and_then(|query, task_spawner: TaskSpawner, chain, log| { + task_spawner.blocking_json_task(Priority::P1, move || { + block_rewards::get_block_rewards(query, chain, log) + }) }); // POST lighthouse/analysis/block_rewards @@ -3932,11 +4258,16 @@ pub fn serve( .and(warp::path("block_rewards")) .and(warp::body::json()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then(|blocks, chain, log| { - blocking_json_task(move || block_rewards::compute_block_rewards(blocks, chain, log)) - }); + .and_then( + |blocks, task_spawner: TaskSpawner, chain, log| { + task_spawner.blocking_json_task(Priority::P1, move || { + block_rewards::compute_block_rewards(blocks, chain, log) + }) + }, + ); // GET lighthouse/analysis/attestation_performance/{index} let get_lighthouse_attestation_performance = warp::path("lighthouse") @@ -3945,12 +4276,15 @@ pub fn serve( .and(warp::path::param::()) .and(warp::query::()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|target, query, chain: Arc>| { - blocking_json_task(move || { - attestation_performance::get_attestation_performance(target, query, chain) - }) - }); + .and_then( + |target, query, task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + attestation_performance::get_attestation_performance(target, query, chain) + }) + }, + ); // GET lighthouse/analysis/block_packing_efficiency let get_lighthouse_block_packing_efficiency = warp::path("lighthouse") @@ -3958,35 +4292,45 @@ pub fn serve( .and(warp::path("block_packing_efficiency")) .and(warp::query::()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|query, chain: Arc>| { - blocking_json_task(move || { - block_packing_efficiency::get_block_packing_efficiency(query, chain) - }) - }); + .and_then( + |query, task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + block_packing_efficiency::get_block_packing_efficiency(query, chain) + }) + }, + ); // GET lighthouse/merge_readiness let get_lighthouse_merge_readiness = warp::path("lighthouse") .and(warp::path("merge_readiness")) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| async move { - let merge_readiness = chain.check_merge_readiness().await; - Ok::<_, warp::reject::Rejection>( - warp::reply::json(&api_types::GenericResponse::from(merge_readiness)) - .into_response(), - ) - }); + .and_then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.spawn_async_with_rejection(Priority::P1, async move { + let merge_readiness = chain.check_merge_readiness().await; + Ok::<_, warp::reject::Rejection>( + warp::reply::json(&api_types::GenericResponse::from(merge_readiness)) + .into_response(), + ) + }) + }, + ); let get_events = eth_v1 .and(warp::path("events")) .and(warp::path::end()) .and(multi_key_query::()) + .and(task_spawner_filter.clone()) .and(chain_filter) .and_then( |topics_res: Result, + task_spawner: TaskSpawner, chain: Arc>| { - blocking_response_task(move || { + task_spawner.blocking_response_task(Priority::P0, move || { let topics = topics_res?; // for each topic subscribed spawn a new subscription let mut receivers = Vec::with_capacity(topics.topics.len()); @@ -4057,38 +4401,46 @@ pub fn serve( let lighthouse_log_events = warp::path("lighthouse") .and(warp::path("logs")) .and(warp::path::end()) + .and(task_spawner_filter) .and(sse_component_filter) - .and_then(|sse_component: Option| { - blocking_response_task(move || { - if let Some(logging_components) = sse_component { - // Build a JSON stream - let s = - BroadcastStream::new(logging_components.sender.subscribe()).map(|msg| { - match msg { - Ok(data) => { - // Serialize to json - match data.to_json_string() { - // Send the json as a Server Side Event - Ok(json) => Ok(Event::default().data(json)), - Err(e) => Err(warp_utils::reject::server_sent_event_error( - format!("Unable to serialize to JSON {}", e), - )), + .and_then( + |task_spawner: TaskSpawner, sse_component: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + if let Some(logging_components) = sse_component { + // Build a JSON stream + let s = BroadcastStream::new(logging_components.sender.subscribe()).map( + |msg| { + match msg { + Ok(data) => { + // Serialize to json + match data.to_json_string() { + // Send the json as a Server Side Event + Ok(json) => Ok(Event::default().data(json)), + Err(e) => { + Err(warp_utils::reject::server_sent_event_error( + format!("Unable to serialize to JSON {}", e), + )) + } + } } + Err(e) => Err(warp_utils::reject::server_sent_event_error( + format!("Unable to receive event {}", e), + )), } - Err(e) => Err(warp_utils::reject::server_sent_event_error( - format!("Unable to receive event {}", e), - )), - } - }); + }, + ); - Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s))) - } else { - Err(warp_utils::reject::custom_server_error( - "SSE Logging is not enabled".to_string(), - )) - } - }) - }); + Ok::<_, warp::Rejection>(warp::sse::reply( + warp::sse::keep_alive().stream(s), + )) + } else { + Err(warp_utils::reject::custom_server_error( + "SSE Logging is not enabled".to_string(), + )) + } + }) + }, + ); // Define the ultimate set of routes that will be provided to the server. // Use `uor` rather than `or` in order to simplify types (see `UnifyingOrFilter`). diff --git a/beacon_node/http_api/src/task_spawner.rs b/beacon_node/http_api/src/task_spawner.rs new file mode 100644 index 0000000000..b4da67f77c --- /dev/null +++ b/beacon_node/http_api/src/task_spawner.rs @@ -0,0 +1,214 @@ +use beacon_processor::{BeaconProcessorSend, BlockingOrAsync, Work, WorkEvent}; +use serde::Serialize; +use std::future::Future; +use tokio::sync::{mpsc::error::TrySendError, oneshot}; +use types::EthSpec; +use warp::reply::{Reply, Response}; + +/// Maps a request to a queue in the `BeaconProcessor`. +#[derive(Clone, Copy)] +pub enum Priority { + /// The highest priority. + P0, + /// The lowest priority. + P1, +} + +impl Priority { + /// Wrap `self` in a `WorkEvent` with an appropriate priority. + fn work_event(&self, process_fn: BlockingOrAsync) -> WorkEvent { + let work = match self { + Priority::P0 => Work::ApiRequestP0(process_fn), + Priority::P1 => Work::ApiRequestP1(process_fn), + }; + WorkEvent { + drop_during_sync: false, + work, + } + } +} + +/// Spawns tasks on the `BeaconProcessor` or directly on the tokio executor. +pub struct TaskSpawner { + /// Used to send tasks to the `BeaconProcessor`. The tokio executor will be + /// used if this is `None`. + beacon_processor_send: Option>, +} + +impl TaskSpawner { + pub fn new(beacon_processor_send: Option>) -> Self { + Self { + beacon_processor_send, + } + } + + /// Executes a "blocking" (non-async) task which returns a `Response`. + pub async fn blocking_response_task( + self, + priority: Priority, + func: F, + ) -> Result + where + F: FnOnce() -> Result + Send + Sync + 'static, + T: Reply + Send + 'static, + { + if let Some(beacon_processor_send) = &self.beacon_processor_send { + // Create a closure that will execute `func` and send the result to + // a channel held by this thread. + let (tx, rx) = oneshot::channel(); + let process_fn = move || { + // Execute the function, collect the return value. + let func_result = func(); + // Send the result down the channel. Ignore any failures; the + // send can only fail if the receiver is dropped. + let _ = tx.send(func_result); + }; + + // Send the function to the beacon processor for execution at some arbitrary time. + match send_to_beacon_processor( + beacon_processor_send, + priority, + BlockingOrAsync::Blocking(Box::new(process_fn)), + rx, + ) + .await + { + Ok(result) => result.map(Reply::into_response), + Err(error_response) => Ok(error_response), + } + } else { + // There is no beacon processor so spawn a task directly on the + // tokio executor. + warp_utils::task::blocking_response_task(func).await + } + } + + /// Executes a "blocking" (non-async) task which returns a JSON-serializable + /// object. + pub async fn blocking_json_task( + self, + priority: Priority, + func: F, + ) -> Result + where + F: FnOnce() -> Result + Send + Sync + 'static, + T: Serialize + Send + 'static, + { + let func = || func().map(|t| warp::reply::json(&t).into_response()); + self.blocking_response_task(priority, func).await + } + + /// Executes an async task which may return a `warp::Rejection`. + pub async fn spawn_async_with_rejection( + self, + priority: Priority, + func: impl Future> + Send + Sync + 'static, + ) -> Result { + if let Some(beacon_processor_send) = &self.beacon_processor_send { + // Create a wrapper future that will execute `func` and send the + // result to a channel held by this thread. + let (tx, rx) = oneshot::channel(); + let process_fn = async move { + // Await the future, collect the return value. + let func_result = func.await; + // Send the result down the channel. Ignore any failures; the + // send can only fail if the receiver is dropped. + let _ = tx.send(func_result); + }; + + // Send the function to the beacon processor for execution at some arbitrary time. + send_to_beacon_processor( + beacon_processor_send, + priority, + BlockingOrAsync::Async(Box::pin(process_fn)), + rx, + ) + .await + .unwrap_or_else(Result::Ok) + } else { + // There is no beacon processor so spawn a task directly on the + // tokio executor. + tokio::task::spawn(func).await.unwrap_or_else(|e| { + let response = warp::reply::with_status( + warp::reply::json(&format!("Tokio did not execute task: {e:?}")), + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(); + Ok(response) + }) + } + } + + /// Executes an async task which always returns a `Response`. + pub async fn spawn_async( + self, + priority: Priority, + func: impl Future + Send + Sync + 'static, + ) -> Response { + if let Some(beacon_processor_send) = &self.beacon_processor_send { + // Create a wrapper future that will execute `func` and send the + // result to a channel held by this thread. + let (tx, rx) = oneshot::channel(); + let process_fn = async move { + // Await the future, collect the return value. + let func_result = func.await; + // Send the result down the channel. Ignore any failures; the + // send can only fail if the receiver is dropped. + let _ = tx.send(func_result); + }; + + // Send the function to the beacon processor for execution at some arbitrary time. + send_to_beacon_processor( + beacon_processor_send, + priority, + BlockingOrAsync::Async(Box::pin(process_fn)), + rx, + ) + .await + .unwrap_or_else(|error_response| error_response) + } else { + // There is no beacon processor so spawn a task directly on the + // tokio executor. + tokio::task::spawn(func).await.unwrap_or_else(|e| { + warp::reply::with_status( + warp::reply::json(&format!("Tokio did not execute task: {e:?}")), + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response() + }) + } + } +} + +/// Send a task to the beacon processor and await execution. +/// +/// If the task is not executed, return an `Err(response)` with an error message +/// for the API consumer. +async fn send_to_beacon_processor( + beacon_processor_send: &BeaconProcessorSend, + priority: Priority, + process_fn: BlockingOrAsync, + rx: oneshot::Receiver, +) -> Result { + let error_message = match beacon_processor_send.try_send(priority.work_event(process_fn)) { + Ok(()) => { + match rx.await { + // The beacon processor executed the task and sent a result. + Ok(func_result) => return Ok(func_result), + // The beacon processor dropped the channel without sending a + // result. The beacon processor dropped this task because its + // queues are full or it's shutting down. + Err(_) => "The task did not execute. The server is overloaded or shutting down.", + } + } + Err(TrySendError::Full(_)) => "The task was dropped. The server is overloaded.", + Err(TrySendError::Closed(_)) => "The task was dropped. The server is shutting down.", + }; + + let error_response = warp::reply::with_status( + warp::reply::json(&error_message), + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(); + Err(error_response) +} diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 694402a3d7..0367776f8d 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -5,6 +5,7 @@ use beacon_chain::{ }, BeaconChain, BeaconChainTypes, }; +use beacon_processor::{BeaconProcessor, BeaconProcessorChannels, BeaconProcessorConfig}; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use lighthouse_network::{ @@ -26,7 +27,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::Duration; use store::MemoryStore; -use tokio::sync::oneshot; +use task_executor::test_utils::TestRuntime; use types::{ChainSpec, EthSpec}; pub const TCP_PORT: u16 = 42; @@ -39,7 +40,6 @@ pub struct InteractiveTester { pub harness: BeaconChainHarness>, pub client: BeaconNodeHttpClient, pub network_rx: NetworkReceivers, - _server_shutdown: oneshot::Sender<()>, } /// The result of calling `create_api_server`. @@ -48,7 +48,6 @@ pub struct InteractiveTester { pub struct ApiServer> { pub server: SFut, pub listening_socket: SocketAddr, - pub shutdown_tx: oneshot::Sender<()>, pub network_rx: NetworkReceivers, pub local_enr: Enr, pub external_peer_id: PeerId, @@ -96,10 +95,14 @@ impl InteractiveTester { let ApiServer { server, listening_socket, - shutdown_tx: _server_shutdown, network_rx, .. - } = create_api_server(harness.chain.clone(), harness.logger().clone()).await; + } = create_api_server( + harness.chain.clone(), + &harness.runtime, + harness.logger().clone(), + ) + .await; tokio::spawn(server); @@ -117,22 +120,23 @@ impl InteractiveTester { harness, client, network_rx, - _server_shutdown, } } } pub async fn create_api_server( chain: Arc>, + test_runtime: &TestRuntime, log: Logger, ) -> ApiServer> { // Get a random unused port. let port = unused_port::unused_tcp4_port().unwrap(); - create_api_server_on_port(chain, log, port).await + create_api_server_on_port(chain, test_runtime, log, port).await } pub async fn create_api_server_on_port( chain: Arc>, + test_runtime: &TestRuntime, log: Logger, port: u16, ) -> ApiServer> { @@ -180,6 +184,37 @@ pub async fn create_api_server_on_port( let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); + let beacon_processor_config = BeaconProcessorConfig::default(); + let BeaconProcessorChannels { + beacon_processor_tx, + beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx, + } = BeaconProcessorChannels::new(&beacon_processor_config); + + let beacon_processor_send = beacon_processor_tx; + BeaconProcessor { + network_globals: network_globals.clone(), + executor: test_runtime.task_executor.clone(), + // The number of workers must be greater than one. Tests which use the + // builder workflow sometimes require an internal HTTP request in order + // to fulfill an already in-flight HTTP request, therefore having only + // one worker will result in a deadlock. + max_workers: 2, + current_workers: 0, + config: beacon_processor_config, + log: log.clone(), + } + .spawn_manager( + beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx, + None, + chain.slot_clock.clone(), + chain.spec.maximum_gossip_clock_disparity(), + ) + .unwrap(); + let ctx = Arc::new(Context { config: Config { enabled: true, @@ -190,26 +225,22 @@ pub async fn create_api_server_on_port( allow_sync_stalled: false, data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, + enable_beacon_processor: true, }, chain: Some(chain), network_senders: Some(network_senders), network_globals: Some(network_globals), + beacon_processor_send: Some(beacon_processor_send), eth1_service: Some(eth1_service), sse_logging_components: None, log, }); - let (shutdown_tx, shutdown_rx) = oneshot::channel(); - let server_shutdown = async { - // It's not really interesting why this triggered, just that it happened. - let _ = shutdown_rx.await; - }; - let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap(); + let (listening_socket, server) = crate::serve(ctx, test_runtime.task_executor.exit()).unwrap(); ApiServer { server, listening_socket, - shutdown_tx, network_rx: network_receivers, local_enr: enr, external_peer_id: peer_id, diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs new file mode 100644 index 0000000000..18e9dbf636 --- /dev/null +++ b/beacon_node/http_api/src/validator.rs @@ -0,0 +1,21 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use types::*; + +/// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator +/// index and then ensures that the validator exists in the given `state`. +pub fn pubkey_to_validator_index( + chain: &BeaconChain, + state: &BeaconState, + pubkey: &PublicKeyBytes, +) -> Result, BeaconChainError> { + chain + .validator_index(pubkey)? + .filter(|&index| { + state + .validators() + .get(index) + .map_or(false, |v| v.pubkey == *pubkey) + }) + .map(Result::Ok) + .transpose() +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index efdf66747d..3ae495378e 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -30,7 +30,6 @@ use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; -use tokio::sync::oneshot; use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; @@ -70,7 +69,6 @@ struct ApiTester { attester_slashing: AttesterSlashing, proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, - _server_shutdown: oneshot::Sender<()>, network_rx: NetworkReceivers, local_enr: Enr, external_peer_id: PeerId, @@ -234,11 +232,10 @@ impl ApiTester { let ApiServer { server, listening_socket: _, - shutdown_tx, network_rx, local_enr, external_peer_id, - } = create_api_server_on_port(chain.clone(), log, port).await; + } = create_api_server_on_port(chain.clone(), &harness.runtime, log, port).await; harness.runtime.task_executor.spawn(server, "api_server"); @@ -266,7 +263,6 @@ impl ApiTester { attester_slashing, proposer_slashing, voluntary_exit, - _server_shutdown: shutdown_tx, network_rx, local_enr, external_peer_id, @@ -320,11 +316,10 @@ impl ApiTester { let ApiServer { server, listening_socket, - shutdown_tx, network_rx, local_enr, external_peer_id, - } = create_api_server(chain.clone(), log).await; + } = create_api_server(chain.clone(), &harness.runtime, log).await; harness.runtime.task_executor.spawn(server, "api_server"); @@ -349,7 +344,6 @@ impl ApiTester { attester_slashing, proposer_slashing, voluntary_exit, - _server_shutdown: shutdown_tx, network_rx, local_enr, external_peer_id, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index db83bfc164..f8c4e37ffe 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -7,9 +7,9 @@ use beacon_chain::{ }; use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; use beacon_processor::{ - work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, - GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, - MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN, + work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorChannels, BeaconProcessorSend, + DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, + WorkEvent as BeaconWorkEvent, }; use environment::null_logger; use lighthouse_network::{ @@ -545,11 +545,15 @@ impl NetworkBeaconProcessor> { pub fn null_for_testing( network_globals: Arc>, ) -> (Self, mpsc::Receiver>) { - let (beacon_processor_send, beacon_processor_receive) = - mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + let BeaconProcessorChannels { + beacon_processor_tx, + beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx: _work_reprocessing_rx, + } = <_>::default(); + let (network_tx, _network_rx) = mpsc::unbounded_channel(); let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); - let (reprocess_tx, _reprocess_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); let log = null_logger().unwrap(); let harness: BeaconChainHarness> = BeaconChainHarness::builder(E::default()) @@ -562,18 +566,18 @@ impl NetworkBeaconProcessor> { let runtime = TestRuntime::default(); let network_beacon_processor = Self { - beacon_processor_send: BeaconProcessorSend(beacon_processor_send), + beacon_processor_send: beacon_processor_tx, duplicate_cache: DuplicateCache::default(), chain: harness.chain, network_tx, sync_tx, - reprocess_tx, + reprocess_tx: work_reprocessing_tx, network_globals, invalid_block_storage: InvalidBlockStorage::Disabled, executor: runtime.task_executor.clone(), log, }; - (network_beacon_processor, beacon_processor_receive) + (network_beacon_processor, beacon_processor_rx) } } diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index dbe93de1ea..ce5b914117 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -11,7 +11,7 @@ use crate::{ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconChain, ChainConfig}; +use beacon_chain::BeaconChain; use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, @@ -68,16 +68,21 @@ struct TestRig { impl Drop for TestRig { fn drop(&mut self) { // Causes the beacon processor to shutdown. - self.beacon_processor_tx = BeaconProcessorSend(mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0); + let len = BeaconProcessorConfig::default().max_work_event_queue_len; + self.beacon_processor_tx = BeaconProcessorSend(mpsc::channel(len).0); } } impl TestRig { pub async fn new(chain_length: u64) -> Self { - Self::new_with_chain_config(chain_length, ChainConfig::default()).await + Self::new_parametric( + chain_length, + BeaconProcessorConfig::default().enable_backfill_rate_limiting, + ) + .await } - pub async fn new_with_chain_config(chain_length: u64, chain_config: ChainConfig) -> Self { + pub async fn new_parametric(chain_length: u64, enable_backfill_rate_limiting: bool) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = E::default_spec(); spec.shard_committee_period = 2; @@ -86,7 +91,7 @@ impl TestRig { .spec(spec) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .chain_config(chain_config) + .chain_config(<_>::default()) .build(); harness.advance_slot(); @@ -172,8 +177,15 @@ impl TestRig { let log = harness.logger().clone(); - let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); - let beacon_processor_tx = BeaconProcessorSend(beacon_processor_tx); + let mut beacon_processor_config = BeaconProcessorConfig::default(); + beacon_processor_config.enable_backfill_rate_limiting = enable_backfill_rate_limiting; + let BeaconProcessorChannels { + beacon_processor_tx, + beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx, + } = BeaconProcessorChannels::new(&beacon_processor_config); + let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); // Default metadata @@ -196,8 +208,6 @@ impl TestRig { let executor = harness.runtime.task_executor.clone(); - let (work_reprocessing_tx, work_reprocessing_rx) = - mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); let duplicate_cache = DuplicateCache::default(); @@ -220,7 +230,7 @@ impl TestRig { executor, max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, - enable_backfill_rate_limiting: harness.chain.config.enable_backfill_rate_limiting, + config: beacon_processor_config, log: log.clone(), } .spawn_manager( @@ -943,11 +953,8 @@ async fn test_backfill_sync_processing() { /// Ensure that backfill batches get processed as fast as they can when rate-limiting is disabled. #[tokio::test] async fn test_backfill_sync_processing_rate_limiting_disabled() { - let chain_config = ChainConfig { - enable_backfill_rate_limiting: false, - ..Default::default() - }; - let mut rig = TestRig::new_with_chain_config(SMALL_CHAIN, chain_config).await; + let enable_backfill_rate_limiting = false; + let mut rig = TestRig::new_parametric(SMALL_CHAIN, enable_backfill_rate_limiting).await; for _ in 0..3 { rig.enqueue_backfill_batch(); diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 9943da15a0..67f62ff90d 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -4,15 +4,13 @@ mod tests { use crate::persisted_dht::load_dht; use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; - use beacon_processor::{ - BeaconProcessorSend, MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN, - }; + use beacon_processor::BeaconProcessorChannels; use lighthouse_network::Enr; use slog::{o, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; - use tokio::{runtime::Runtime, sync::mpsc}; + use tokio::runtime::Runtime; use types::MinimalEthSpec; fn get_logger(actual_log: bool) -> Logger { @@ -70,17 +68,20 @@ mod tests { // Create a new network service which implicitly gets dropped at the // end of the block. - let (beacon_processor_send, _beacon_processor_receive) = - mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); - let (beacon_processor_reprocess_tx, _beacon_processor_reprocess_rx) = - mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); + let BeaconProcessorChannels { + beacon_processor_tx, + beacon_processor_rx: _beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx: _work_reprocessing_rx, + } = <_>::default(); + let _network_service = NetworkService::start( beacon_chain.clone(), &config, executor, None, - BeaconProcessorSend(beacon_processor_send), - beacon_processor_reprocess_tx, + beacon_processor_tx, + work_reprocessing_tx, ) .await .unwrap(); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e46c3d8ca1..0330bd3f7c 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -382,6 +382,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { stalled. This is useful for very small testnets. TESTING ONLY. DO NOT USE ON \ MAINNET.") ) + .arg( + Arg::with_name("http-enable-beacon-processor") + .long("http-enable-beacon-processor") + .value_name("BOOLEAN") + .help("The beacon processor is a scheduler which provides quality-of-service and \ + DoS protection. When set to \"true\", HTTP API requests will be queued and scheduled \ + alongside other tasks. When set to \"false\", HTTP API responses will be executed \ + immediately.") + .takes_value(true) + .default_value("true") + ) /* Prometheus metrics HTTP server related arguments */ .arg( Arg::with_name("metrics") @@ -1141,4 +1152,55 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .possible_values(ProgressiveBalancesMode::VARIANTS) ) + .arg( + Arg::with_name("beacon-processor-max-workers") + .long("beacon-processor-max-workers") + .value_name("INTEGER") + .help("Specifies the maximum concurrent tasks for the task scheduler. Increasing \ + this value may increase resource consumption. Reducing the value \ + may result in decreased resource usage and diminished performance. The \ + default value is the number of logical CPU cores on the host.") + .takes_value(true) + ) + .arg( + Arg::with_name("beacon-processor-work-queue-len") + .long("beacon-processor-work-queue-len") + .value_name("INTEGER") + .help("Specifies the length of the inbound event queue. \ + Higher values may prevent messages from being dropped while lower values \ + may help protect the node from becoming overwhelmed.") + .default_value("16384") + .takes_value(true) + ) + .arg( + Arg::with_name("beacon-processor-reprocess-queue-len") + .long("beacon-processor-reprocess-queue-len") + .value_name("INTEGER") + .help("Specifies the length of the queue for messages requiring delayed processing. \ + Higher values may prevent messages from being dropped while lower values \ + may help protect the node from becoming overwhelmed.") + .default_value("12288") + .takes_value(true) + ) + .arg( + Arg::with_name("beacon-processor-attestation-batch-size") + .long("beacon-processor-attestation-batch-size") + .value_name("INTEGER") + .help("Specifies the number of gossip attestations in a signature verification batch. \ + Higher values may reduce CPU usage in a healthy network whilst lower values may \ + increase CPU usage in an unhealthy or hostile network.") + .default_value("64") + .takes_value(true) + ) + .arg( + Arg::with_name("beacon-processor-aggregate-batch-size") + .long("beacon-processor-aggregate-batch-size") + .value_name("INTEGER") + .help("Specifies the number of gossip aggregate attestations in a signature \ + verification batch. \ + Higher values may reduce CPU usage in a healthy network while lower values may \ + increase CPU usage in an unhealthy or hostile network.") + .default_value("64") + .takes_value(true) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c16b1675a9..21df86620d 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -4,6 +4,7 @@ use beacon_chain::chain_config::{ }; use clap::ArgMatches; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; +use clap_utils::parse_required; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; @@ -148,6 +149,9 @@ pub fn get_config( client_config.http_api.allow_sync_stalled = true; } + client_config.http_api.enable_beacon_processor = + parse_required(cli_args, "http-enable-beacon-processor")?; + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { client_config.chain.shuffling_cache_size = cache_size; } @@ -800,7 +804,7 @@ pub fn get_config( } // Backfill sync rate-limiting - client_config.chain.enable_backfill_rate_limiting = + client_config.beacon_processor.enable_backfill_rate_limiting = !cli_args.is_present("disable-backfill-rate-limiting"); if let Some(path) = clap_utils::parse_optional(cli_args, "invalid-gossip-verified-blocks-path")? @@ -814,6 +818,28 @@ pub fn get_config( client_config.chain.progressive_balances_mode = progressive_balances_mode; } + if let Some(max_workers) = clap_utils::parse_optional(cli_args, "beacon-processor-max-workers")? + { + client_config.beacon_processor.max_workers = max_workers; + } + + if client_config.beacon_processor.max_workers == 0 { + return Err("--beacon-processor-max-workers must be a non-zero value".to_string()); + } + + client_config.beacon_processor.max_work_event_queue_len = + clap_utils::parse_required(cli_args, "beacon-processor-work-queue-len")?; + client_config.beacon_processor.max_scheduled_work_queue_len = + clap_utils::parse_required(cli_args, "beacon-processor-reprocess-queue-len")?; + client_config + .beacon_processor + .max_gossip_attestation_batch_size = + clap_utils::parse_required(cli_args, "beacon-processor-attestation-batch-size")?; + client_config + .beacon_processor + .max_gossip_aggregate_batch_size = + clap_utils::parse_required(cli_args, "beacon-processor-aggregate-batch-size")?; + Ok(client_config) } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 47694825ca..3bef69ce83 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -83,6 +83,7 @@ impl ProductionBeaconNode { let builder = ClientBuilder::new(context.eth_spec_instance.clone()) .runtime_context(context) .chain_spec(spec) + .beacon_processor(client_config.beacon_processor.clone()) .http_api_config(client_config.http_api.clone()) .disk_store(&db_path, &freezer_db_path, store_config, log.clone())?; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 169aa67fdd..8003236f2d 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -66,6 +66,7 @@ lighthouse_network = { path = "../beacon_node/lighthouse_network" } sensitive_url = { path = "../common/sensitive_url" } eth1 = { path = "../beacon_node/eth1" } eth2 = { path = "../common/eth2" } +beacon_processor = { path = "../beacon_node/beacon_processor" } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index bc5f610881..ecc936cbfb 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -5,6 +5,7 @@ use beacon_node::beacon_chain::chain_config::{ DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; +use beacon_processor::BeaconProcessorConfig; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; @@ -1118,13 +1119,13 @@ fn disable_backfill_rate_limiting_flag() { CommandLineTest::new() .flag("disable-backfill-rate-limiting", None) .run_with_zero_port() - .with_config(|config| assert!(!config.chain.enable_backfill_rate_limiting)); + .with_config(|config| assert!(!config.beacon_processor.enable_backfill_rate_limiting)); } #[test] fn default_backfill_rate_limiting_flag() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert!(config.chain.enable_backfill_rate_limiting)); + .with_config(|config| assert!(config.beacon_processor.enable_backfill_rate_limiting)); } #[test] fn default_boot_nodes() { @@ -1463,6 +1464,22 @@ fn http_allow_sync_stalled_flag() { .with_config(|config| assert_eq!(config.http_api.allow_sync_stalled, true)); } #[test] +fn http_enable_beacon_processor() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + + CommandLineTest::new() + .flag("http-enable-beacon-processor", Some("true")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + + CommandLineTest::new() + .flag("http-enable-beacon-processor", Some("false")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, false)); +} +#[test] fn http_tls_flags() { let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() @@ -2295,3 +2312,40 @@ fn progressive_balances_fast() { ) }); } + +#[test] +fn beacon_processor() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.beacon_processor, <_>::default())); + + CommandLineTest::new() + .flag("beacon-processor-max-workers", Some("1")) + .flag("beacon-processor-work-queue-len", Some("2")) + .flag("beacon-processor-reprocess-queue-len", Some("3")) + .flag("beacon-processor-attestation-batch-size", Some("4")) + .flag("beacon-processor-aggregate-batch-size", Some("5")) + .flag("disable-backfill-rate-limiting", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.beacon_processor, + BeaconProcessorConfig { + max_workers: 1, + max_work_event_queue_len: 2, + max_scheduled_work_queue_len: 3, + max_gossip_attestation_batch_size: 4, + max_gossip_aggregate_batch_size: 5, + enable_backfill_rate_limiting: false + } + ) + }); +} + +#[test] +#[should_panic] +fn beacon_processor_zero_workers() { + CommandLineTest::new() + .flag("beacon-processor-max-workers", Some("0")) + .run_with_zero_port(); +} diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 62db67b8c5..394f8558fa 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -24,7 +24,7 @@ pub use execution_layer::test_utils::{ pub use validator_client::Config as ValidatorConfig; /// The global timeout for HTTP requests to the beacon node. -const HTTP_TIMEOUT: Duration = Duration::from_secs(4); +const HTTP_TIMEOUT: Duration = Duration::from_secs(8); /// The timeout for a beacon node to start up. const STARTUP_TIMEOUT: Duration = Duration::from_secs(60); @@ -115,6 +115,11 @@ pub fn testing_client_config() -> ClientConfig { genesis_time: now, }; + // Specify a constant count of beacon processor workers. Having this number + // too low can cause annoying HTTP timeouts, especially on Github runners + // with 2 logical CPUs. + client_config.beacon_processor.max_workers = 4; + client_config } diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 1a003167dc..23e2c566dc 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -43,3 +43,4 @@ beacon_chain = { path = "../beacon_node/beacon_chain" } network = { path = "../beacon_node/network" } testcontainers = "0.14.0" unused_port = { path = "../common/unused_port" } +task_executor = { path = "../common/task_executor" } diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index 28700ccdce..af1cde26b7 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -85,7 +85,6 @@ struct TesterBuilder { pub harness: BeaconChainHarness>, pub config: Config, _bn_network_rx: NetworkReceivers, - _bn_api_shutdown_tx: oneshot::Sender<()>, } impl TesterBuilder { @@ -102,10 +101,14 @@ impl TesterBuilder { let ApiServer { server, listening_socket: bn_api_listening_socket, - shutdown_tx: _bn_api_shutdown_tx, network_rx: _bn_network_rx, .. - } = create_api_server(harness.chain.clone(), harness.logger().clone()).await; + } = create_api_server( + harness.chain.clone(), + &harness.runtime, + harness.logger().clone(), + ) + .await; tokio::spawn(server); /* @@ -139,7 +142,6 @@ impl TesterBuilder { harness, config, _bn_network_rx, - _bn_api_shutdown_tx, } } pub async fn build(self, pool: PgPool) -> Tester { @@ -186,7 +188,6 @@ impl TesterBuilder { config: self.config, updater, _bn_network_rx: self._bn_network_rx, - _bn_api_shutdown_tx: self._bn_api_shutdown_tx, _watch_shutdown_tx, } } @@ -204,7 +205,6 @@ struct Tester { pub config: Config, pub updater: UpdateHandler, _bn_network_rx: NetworkReceivers, - _bn_api_shutdown_tx: oneshot::Sender<()>, _watch_shutdown_tx: oneshot::Sender<()>, } From 1fcada8a328be02987e8135bde0bd37f0eae59b0 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 10 Aug 2023 00:10:09 +0000 Subject: [PATCH 38/75] Improve transport connection errors (#4540) ## Issue Addressed #4538 ## Proposed Changes add newtype wrapper around DialError that extracts error messages and logs them in a more readable format ## Additional Info I was able to test Transport Dial Errors in the situation where a libp2p instance attempts to ping a nonexistent peer. That error message should look something like `A transport level error has ocurred: Connection refused (os error 61)` AgeManning mentioned we should try fetching only the most inner error (in situations where theres a nested error). I took a stab at implementing that For non transport DialErrors, I wrote out the error messages explicitly (as per the docs). Could potentially clean things up here if thats not necessary Co-authored-by: Age Manning --- .../lighthouse_network/src/discovery/mod.rs | 4 +- beacon_node/lighthouse_network/src/lib.rs | 41 +++++++++++++++++++ .../src/peer_manager/network_behaviour.rs | 4 +- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 0f8ddc53c1..82a371d8a2 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -7,9 +7,9 @@ pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder -use crate::metrics; use crate::service::TARGET_SUBNET_PEERS; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; +use crate::{metrics, ClearDialError}; use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ build_enr, create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr, CombinedKey, @@ -1111,7 +1111,7 @@ impl Discovery { | DialError::Transport(_) | DialError::WrongPeerId { .. } => { // set peer as disconnected in discovery DHT - debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); + debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id, "error" => %ClearDialError(error)); self.disconnect_peer(&peer_id); } DialError::DialPeerConditionFalse(_) | DialError::Aborted => {} diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 3d539af3b2..7467fb7f06 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -17,6 +17,7 @@ pub mod rpc; pub mod types; pub use config::gossip_max_size; +use libp2p::swarm::DialError; pub use listen_addr::*; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; @@ -63,6 +64,46 @@ impl<'de> Deserialize<'de> for PeerIdSerialized { } } +// A wrapper struct that prints a dial error nicely. +struct ClearDialError<'a>(&'a DialError); + +impl<'a> ClearDialError<'a> { + fn most_inner_error(err: &(dyn std::error::Error)) -> &(dyn std::error::Error) { + let mut current = err; + while let Some(source) = current.source() { + current = source; + } + current + } +} + +impl<'a> std::fmt::Display for ClearDialError<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { + match &self.0 { + DialError::Transport(errors) => { + for (_, transport_error) in errors { + match transport_error { + libp2p::TransportError::MultiaddrNotSupported(multiaddr_error) => { + write!(f, "Multiaddr not supported: {multiaddr_error}")?; + } + libp2p::TransportError::Other(other_error) => { + let inner_error = ClearDialError::most_inner_error(other_error); + write!(f, "Transport error: {inner_error}")?; + } + } + } + Ok(()) + } + DialError::LocalPeerId { .. } => write!(f, "The peer being dialed is the local peer."), + DialError::NoAddresses => write!(f, "No addresses for the peer to dial."), + DialError::DialPeerConditionFalse(_) => write!(f, "PeerCondition evaluation failed."), + DialError::Aborted => write!(f, "Connection aborted."), + DialError::WrongPeerId { .. } => write!(f, "Wrong peer id."), + DialError::Denied { cause } => write!(f, "Connection denied: {:?}", cause), + } + } +} + pub use crate::types::{ error, Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, SubnetDiscovery, diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index ce374bb9ab..70f421681a 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -12,9 +12,9 @@ use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; use slog::{debug, error}; use types::EthSpec; -use crate::metrics; use crate::rpc::GoodbyeReason; use crate::types::SyncState; +use crate::{metrics, ClearDialError}; use super::peerdb::BanResult; use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; @@ -132,7 +132,7 @@ impl NetworkBehaviour for PeerManager { error, connection_id: _, }) => { - debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %error); + debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %ClearDialError(error)); self.on_dial_failure(peer_id); } FromSwarm::ExternalAddrConfirmed(_) => { From f1ac12f23ae581ace17f1cabcff36a6fb3a3e13f Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 14 Aug 2023 00:29:43 +0000 Subject: [PATCH 39/75] Fix some typos (#4565) --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/beacon_proposer_cache.rs | 4 ++-- beacon_node/execution_layer/src/lib.rs | 2 +- beacon_node/http_api/src/lib.rs | 6 +++--- beacon_node/operation_pool/src/reward_cache.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 7 ++----- consensus/proto_array/src/proto_array.rs | 2 +- validator_client/src/duties_service.rs | 2 +- validator_client/src/lib.rs | 2 +- 9 files changed, 13 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 25964ed216..987ea9e7c3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -164,7 +164,7 @@ pub enum WhenSlotSkipped { /// /// This is how the HTTP API behaves. None, - /// If the slot it a skip slot, return the previous non-skipped block. + /// If the slot is a skip slot, return the previous non-skipped block. /// /// This is generally how the specification behaves. Prev, diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index e76a5a8058..eae71bd63e 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -135,7 +135,7 @@ impl BeaconProposerCache { /// Compute the proposer duties using the head state without cache. pub fn compute_proposer_duties_from_head( - current_epoch: Epoch, + request_epoch: Epoch, chain: &BeaconChain, ) -> Result<(Vec, Hash256, ExecutionStatus, Fork), BeaconChainError> { // Atomically collect information about the head whilst holding the canonical head `Arc` as @@ -159,7 +159,7 @@ pub fn compute_proposer_duties_from_head( .ok_or(BeaconChainError::HeadMissingFromForkChoice(head_block_root))?; // Advance the state into the requested epoch. - ensure_state_is_in_epoch(&mut state, head_state_root, current_epoch, &chain.spec)?; + ensure_state_is_in_epoch(&mut state, head_state_root, request_epoch, &chain.spec)?; let indices = state .get_beacon_proposer_indices(&chain.spec) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 579bebdacb..b57bba7518 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -76,7 +76,7 @@ const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = /// A payload alongside some information about where it came from. pub enum ProvenancedPayload

{ - /// A good ol' fashioned farm-to-table payload from your local EE. + /// A good old fashioned farm-to-table payload from your local EE. Local(P), /// A payload from a builder (e.g. mev-boost). Builder(P), diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 9512d18aba..4d28326d18 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1426,10 +1426,10 @@ pub fn serve( ); /* - * beacon/blocks + * beacon/blinded_blocks */ - // POST beacon/blocks + // POST beacon/blinded_blocks let post_beacon_blinded_blocks = eth_v1 .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) @@ -3208,7 +3208,7 @@ pub fn serve( }, ); - // POST validator/duties/sync + // POST validator/duties/sync/{epoch} let post_validator_duties_sync = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) diff --git a/beacon_node/operation_pool/src/reward_cache.rs b/beacon_node/operation_pool/src/reward_cache.rs index 5b9d4258e9..9e4c424bd7 100644 --- a/beacon_node/operation_pool/src/reward_cache.rs +++ b/beacon_node/operation_pool/src/reward_cache.rs @@ -12,7 +12,7 @@ struct Initialization { #[derive(Debug, Clone, Default)] pub struct RewardCache { initialization: Option, - /// `BitVec` of validator indices which don't have default participation flags for the prev. epoch. + /// `BitVec` of validator indices which don't have default participation flags for the prev epoch. /// /// We choose to only track whether validators have *any* participation flag set because /// it's impossible to include a new attestation which is better than the existing participation diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index e60774fc86..059494453c 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -466,13 +466,10 @@ where // for lower slots to account for skip slots. .find(|(_, slot)| *slot <= ancestor_slot) .map(|(root, _)| root)), - Ordering::Less => Ok(Some(block_root)), - Ordering::Equal => // Root is older than queried slot, thus a skip slot. Return most recent root prior // to slot. - { - Ok(Some(block_root)) - } + Ordering::Less => Ok(Some(block_root)), + Ordering::Equal => Ok(Some(block_root)), } } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 88111b461d..6cb10e3d20 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -910,7 +910,7 @@ impl ProtoArray { Ok(()) } - /// Indicates if the node itself is viable for the head, or if it's best descendant is viable + /// Indicates if the node itself is viable for the head, or if its best descendant is viable /// for the head. fn node_leads_to_viable_head( &self, diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 535f6aeb0a..a3b3cabccc 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -1021,7 +1021,7 @@ async fn fill_in_selection_proofs( /// 2. We won't miss a block if the duties for the current slot happen to change with this poll. /// /// This sounds great, but is it safe? Firstly, the additional notification will only contain block -/// producers that were not included in the first notification. This should be safety enough. +/// producers that were not included in the first notification. This should be safe enough. /// However, we also have the slashing protection as a second line of defence. These two factors /// provide an acceptable level of safety. /// diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index f7a80f0a8e..6ca8db87d5 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -524,7 +524,7 @@ impl ProductionValidatorClient { pub fn start_service(&mut self) -> Result<(), String> { // We use `SLOTS_PER_EPOCH` as the capacity of the block notification channel, because - // we don't except notifications to be delayed by more than a single slot, let alone a + // we don't expect notifications to be delayed by more than a single slot, let alone a // whole epoch! let channel_capacity = T::slots_per_epoch() as usize; let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity); From 9d8b2764ef894cb6075722669e28dcc68243228c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 14 Aug 2023 00:29:44 +0000 Subject: [PATCH 40/75] align editorconfig with rustfmt (#4600) ## Issue Addressed There seems to be a conflict between `editorconfig` and `rustfmt`. `editorconfig` is configured with [`insert_final_newline=false`](https://github.com/sigp/lighthouse/blob/stable/.editorconfig#L9C1-L9C21) which [removes the newline](https://github.com/editorconfig/editorconfig/wiki/EditorConfig-Properties#insert_final_newline), whereas `rustfmt` [adds a newline](https://rust-lang.github.io/rustfmt/?version=v1.6.0&search=#newline_style). ## Proposed Changes Align `.editorconfig` with `rustfmt` --- .editorconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.editorconfig b/.editorconfig index a14dd7a516..149415d127 100644 --- a/.editorconfig +++ b/.editorconfig @@ -6,4 +6,4 @@ end_of_line=lf charset=utf-8 trim_trailing_whitespace=true max_line_length=100 -insert_final_newline=false +insert_final_newline=true \ No newline at end of file From 501ce62d7c9acd5be23b885fcd86e01331df903d Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 14 Aug 2023 00:29:45 +0000 Subject: [PATCH 41/75] minor optimize process_active_validator: avoid a call to `state.get_validator` (#4608) --- .../src/per_epoch_processing/altair/participation_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs index a5caddd045..d67e7874cb 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs @@ -111,8 +111,8 @@ impl SingleEpochParticipationCache { current_epoch: Epoch, relative_epoch: RelativeEpoch, ) -> Result<(), BeaconStateError> { - let val_balance = state.get_effective_balance(val_index)?; let validator = state.get_validator(val_index)?; + let val_balance = validator.effective_balance; // Sanity check to ensure the validator is active. let epoch = relative_epoch.into_epoch(current_epoch); From fa93b58257c34c313f52debba184b3fa871c51df Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 14 Aug 2023 00:29:46 +0000 Subject: [PATCH 42/75] remove `optional_eth2_network_config` (#4611) It seems the passed [`optional_config`](https://github.com/sigp/lighthouse/blob/dfcb3363c757671eb19d5f8e519b4b94ac74677a/lighthouse/src/main.rs#L515) is always `Some` instead of `None`. --- lighthouse/environment/src/lib.rs | 12 ------------ lighthouse/src/main.rs | 2 +- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 53915b52d9..fc7ab8d52c 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -344,18 +344,6 @@ impl EnvironmentBuilder { Ok(self) } - /// Optionally adds a network configuration to the environment. - pub fn optional_eth2_network_config( - self, - optional_config: Option, - ) -> Result { - if let Some(config) = optional_config { - self.eth2_network_config(config) - } else { - Ok(self) - } - } - /// Consumes the builder, returning an `Environment`. pub fn build(self) -> Result, String> { let (signal, exit) = exit_future::signal(); diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 73e042342a..8b3271f43b 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -513,7 +513,7 @@ fn run( let mut environment = builder .multi_threaded_tokio_runtime()? - .optional_eth2_network_config(Some(eth2_network_config))? + .eth2_network_config(eth2_network_config)? .build()?; let log = environment.core_context().log().clone(); From e92359b756124c948a0a9167844a2a9faa36449d Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 14 Aug 2023 00:29:47 +0000 Subject: [PATCH 43/75] use account_manager::CMD instead of magic string (#4612) Make the code style a bit more consistent with following lines. --- lighthouse/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 8b3271f43b..d8b522307c 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -559,7 +559,7 @@ fn run( (Some(_), Some(_)) => panic!("CLI prevents both --network and --testnet-dir"), }; - if let Some(sub_matches) = matches.subcommand_matches("account_manager") { + if let Some(sub_matches) = matches.subcommand_matches(account_manager::CMD) { eprintln!("Running account manager for {} network", network_name); // Pass the entire `environment` to the account manager so it can run blocking operations. account_manager::run(sub_matches, environment)?; From 842b42297b9bd44cb78a8348b7ed3096af6ffdf6 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 14 Aug 2023 00:29:47 +0000 Subject: [PATCH 44/75] Fix bug of `init_from_beacon_node` (#4613) --- validator_client/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 6ca8db87d5..6f071055a4 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -627,8 +627,8 @@ async fn init_from_beacon_node( let num_available = beacon_nodes.num_available().await; let num_total = beacon_nodes.num_total(); - let proposer_available = beacon_nodes.num_available().await; - let proposer_total = beacon_nodes.num_total(); + let proposer_available = proposer_nodes.num_available().await; + let proposer_total = proposer_nodes.num_total(); if proposer_total > 0 && proposer_available == 0 { warn!( From ca050053bf2758467f4f17d601d939c89fb24c9d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 14 Aug 2023 03:16:03 +0000 Subject: [PATCH 45/75] Use the native `concurrency` property to cancel workflows (#4572) I noticed that some of our workflows aren't getting cancelled when a new one has been triggered, so we ended up having a long queue in our CI when multiple changes are triggered in a short period. Looking at the comment here, I noticed the list of workflow IDs are outdated and no longer exist, and some new ones are missing: https://github.com/sigp/lighthouse/blob/dfcb3363c757671eb19d5f8e519b4b94ac74677a/.github/workflows/cancel-previous-runs.yml#L12-L13 I attempted to update these, and came across this comment on the [`cancel-workflow-action`](https://github.com/styfle/cancel-workflow-action) repo: > You probably don't need to install this custom action. > > Instead, use the native [concurrency](https://github.blog/changelog/2021-04-19-github-actions-limit-workflow-run-or-job-concurrency/) property to cancel workflows, for example: So I thought instead of updating the workflow and maintaining the workflow IDs, perhaps we can try experimenting the [native `concurrency` property](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency). --- .github/workflows/book.yml | 4 ++++ .github/workflows/cancel-previous-runs.yml | 14 -------------- .github/workflows/docker-antithesis.yml | 4 ++++ .github/workflows/docker.yml | 4 ++++ .github/workflows/linkcheck.yml | 4 ++++ .github/workflows/local-testnet.yml | 4 ++++ .github/workflows/release.yml | 4 ++++ .github/workflows/test-suite.yml | 5 +++++ 8 files changed, 29 insertions(+), 14 deletions(-) delete mode 100644 .github/workflows/cancel-previous-runs.yml diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 598754368e..db458a3dbf 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -5,6 +5,10 @@ on: branches: - unstable +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: build-and-upload-to-s3: runs-on: ubuntu-20.04 diff --git a/.github/workflows/cancel-previous-runs.yml b/.github/workflows/cancel-previous-runs.yml deleted file mode 100644 index 2eaefa40ca..0000000000 --- a/.github/workflows/cancel-previous-runs.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: cancel previous runs -on: [push] -jobs: - cancel: - name: 'Cancel Previous Runs' - runs-on: ubuntu-latest - timeout-minutes: 3 - steps: - # https://github.com/styfle/cancel-workflow-action/releases - - uses: styfle/cancel-workflow-action@514c783324374c6940d1b92bfb962d0763d22de3 # 0.7.0 - with: - # https://api.github.com/repos/sigp/lighthouse/actions/workflows - workflow_id: 697364,2434944,4462424,308241,2883401,316 - access_token: ${{ github.token }} diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml index 84f5541a3c..a96431fafb 100644 --- a/.github/workflows/docker-antithesis.yml +++ b/.github/workflows/docker-antithesis.yml @@ -5,6 +5,10 @@ on: branches: - unstable +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }} ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }} diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c3119db378..21ca4940d9 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -8,6 +8,10 @@ on: tags: - v* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 8428c0a3b0..19236691f6 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -9,6 +9,10 @@ on: - 'book/**' merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: linkcheck: name: Check broken links diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index ea4c1e2488..1269aee627 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -8,6 +8,10 @@ on: pull_request: merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: run-local-testnet: strategy: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 30e4211b88..e38b03daf7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,6 +5,10 @@ on: tags: - v* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index ab31b3a92b..91a0b73453 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -9,6 +9,11 @@ on: - 'pr/*' pull_request: merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: # Deny warnings in CI # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) From dfab24bf92fad9bdb10311851a63963ed620ea55 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 14 Aug 2023 03:16:04 +0000 Subject: [PATCH 46/75] opt `maybe_update_best_child_and_descendant`: remove an impossible case (#4583) Here `child.weight == best_child.weight` is impossible since it's already checked [above](https://github.com/sigp/lighthouse/blob/dfcb3363c757671eb19d5f8e519b4b94ac74677a/consensus/proto_array/src/proto_array.rs#L878). --- consensus/proto_array/src/proto_array.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 6cb10e3d20..7b6afb94f5 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -884,7 +884,7 @@ impl ProtoArray { } } else { // Choose the winner by weight. - if child.weight >= best_child.weight { + if child.weight > best_child.weight { change_to_child } else { no_change From 912f869829689dbf7881e3f2098d5d864a59d041 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 14 Aug 2023 04:06:34 +0000 Subject: [PATCH 47/75] ForkChoice: remove `head_block_root` field (#4590) This field is redundant with `ForkchoiceUpdateParameters.head_root`, the same `head_root` is assigned to both fields in [`get_head`](https://github.com/sigp/lighthouse/blob/dfcb3363c757671eb19d5f8e519b4b94ac74677a/consensus/fork_choice/src/fork_choice.rs#L508-L523). --- consensus/fork_choice/src/fork_choice.rs | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 059494453c..4f563f8639 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -289,9 +289,10 @@ pub enum AttestationFromBlock { False, } -/// Parameters which are cached between calls to `Self::get_head`. +/// Parameters which are cached between calls to `ForkChoice::get_head`. #[derive(Clone, Copy)] pub struct ForkchoiceUpdateParameters { + /// The most recent result of running `ForkChoice::get_head`. pub head_root: Hash256, pub head_hash: Option, pub justified_hash: Option, @@ -324,8 +325,6 @@ pub struct ForkChoice { queued_attestations: Vec, /// Stores a cache of the values required to be sent to the execution layer. forkchoice_update_parameters: ForkchoiceUpdateParameters, - /// The most recent result of running `Self::get_head`. - head_block_root: Hash256, _phantom: PhantomData, } @@ -410,14 +409,13 @@ where head_hash: None, justified_hash: None, finalized_hash: None, + // This will be updated during the next call to `Self::get_head`. head_root: Hash256::zero(), }, - // This will be updated during the next call to `Self::get_head`. - head_block_root: Hash256::zero(), _phantom: PhantomData, }; - // Ensure that `fork_choice.head_block_root` is updated. + // Ensure that `fork_choice.forkchoice_update_parameters.head_root` is updated. fork_choice.get_head(current_slot, spec)?; Ok(fork_choice) @@ -502,8 +500,6 @@ where spec, )?; - self.head_block_root = head_root; - // Cache some values for the next forkchoiceUpdate call to the execution layer. let head_hash = self .get_block(&head_root) @@ -607,7 +603,7 @@ where /// have *differing* finalized and justified information. pub fn cached_fork_choice_view(&self) -> ForkChoiceView { ForkChoiceView { - head_block_root: self.head_block_root, + head_block_root: self.forkchoice_update_parameters.head_root, justified_checkpoint: self.justified_checkpoint(), finalized_checkpoint: self.finalized_checkpoint(), } @@ -1518,10 +1514,9 @@ where head_hash: None, justified_hash: None, finalized_hash: None, + // Will be updated in the following call to `Self::get_head`. head_root: Hash256::zero(), }, - // Will be updated in the following call to `Self::get_head`. - head_block_root: Hash256::zero(), _phantom: PhantomData, }; From 249f85f1d9c643a0ec4f952966909bb4f7d4c64f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 14 Aug 2023 04:06:37 +0000 Subject: [PATCH 48/75] Improve HTTP API error messages + tweaks (#4595) ## Issue Addressed Closes #3404 (mostly) ## Proposed Changes - Remove all uses of Warp's `and_then` (which backtracks) in favour of `then` (which doesn't). - Bump the priority of the `POST` method for `v2/blocks` to `P0`. Publishing a block needs to happen quickly. - Run the new SSZ POST endpoints on the beacon processor. I think this was missed in between merging #4462 and #4504/#4479. - Fix a minor issue in the validator registrations endpoint whereby an error from spawning the task on the beacon processor would be dropped. ## Additional Info I've tested this manually and can confirm that we no longer get the dreaded `Unsupported endpoint version` errors for queries like: ``` $ curl -X POST -H "Content-Type: application/json" --data @block.json "http://localhost:5052/eth/v2/beacon/blocks" | jq { "code": 400, "message": "BAD_REQUEST: WeakSubjectivityConflict", "stacktraces": [] } ``` ``` $ curl -X POST -H "Content-Type: application/octet-stream" --data @block.json "http://localhost:5052/eth/v2/beacon/blocks" | jq { "code": 400, "message": "BAD_REQUEST: invalid SSZ: OffsetOutOfBounds(572530811)", "stacktraces": [] } ``` ``` $ curl "http://localhost:5052/eth/v2/validator/blocks/7067595" {"code":400,"message":"BAD_REQUEST: invalid query: Invalid query string","stacktraces":[]} ``` However, I can still trigger it by leaving off the `Content-Type`. We can re-test this aspect with #4575. --- beacon_node/http_api/src/lib.rs | 362 +++++++++++------------ beacon_node/http_api/src/task_spawner.rs | 92 +++--- 2 files changed, 227 insertions(+), 227 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 4d28326d18..4d5b98a823 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -516,7 +516,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let genesis_data = api_types::GenesisData { @@ -549,7 +549,7 @@ pub fn serve( .clone() .and(warp::path("root")) .and(warp::path::end()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>| { @@ -570,7 +570,7 @@ pub fn serve( .clone() .and(warp::path("fork")) .and(warp::path::end()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>| { @@ -591,7 +591,7 @@ pub fn serve( .clone() .and(warp::path("finality_checkpoints")) .and(warp::path::end()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>| { @@ -627,7 +627,7 @@ pub fn serve( .and(warp::path("validator_balances")) .and(warp::path::end()) .and(multi_key_query::()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>, @@ -685,7 +685,7 @@ pub fn serve( .and(warp::path("validators")) .and(warp::path::end()) .and(multi_key_query::()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>, @@ -769,7 +769,7 @@ pub fn serve( )) })) .and(warp::path::end()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>, @@ -837,7 +837,7 @@ pub fn serve( .and(warp::path("committees")) .and(warp::query::()) .and(warp::path::end()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>, @@ -1020,7 +1020,7 @@ pub fn serve( .and(warp::path("sync_committees")) .and(warp::query::()) .and(warp::path::end()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>, @@ -1086,7 +1086,7 @@ pub fn serve( .and(warp::path("randao")) .and(warp::query::()) .and(warp::path::end()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>, @@ -1128,7 +1128,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |query: api_types::HeadersQuery, task_spawner: TaskSpawner, chain: Arc>| { @@ -1228,7 +1228,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { @@ -1276,7 +1276,7 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |block: Arc>, task_spawner: TaskSpawner, chain: Arc>, @@ -1302,33 +1302,35 @@ pub fn serve( .and(warp::path("blocks")) .and(warp::path::end()) .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |block_bytes: Bytes, + task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| async move { - let block = match SignedBeaconBlock::::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) { - Ok(data) => data, - Err(e) => { - return Err(warp_utils::reject::custom_bad_request(format!("{:?}", e))) - } - }; - publish_blocks::publish_block( - None, - ProvenancedBlock::local(Arc::new(block)), - chain, - &network_tx, - log, - BroadcastValidation::default(), - ) - .await - .map(|()| warp::reply().into_response()) + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block = + SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "invalid SSZ: {e:?}" + )) + })?; + publish_blocks::publish_block( + None, + ProvenancedBlock::local(Arc::new(block)), + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }) }, ); @@ -1349,8 +1351,8 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| { - task_spawner.spawn_async(Priority::P1, async move { - match publish_blocks::publish_block( + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + publish_blocks::publish_block( None, ProvenancedBlock::local(block), chain, @@ -1359,17 +1361,7 @@ pub fn serve( validation_level.broadcast_validation, ) .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } + .map(|()| warp::reply().into_response()) }) }, ); @@ -1380,48 +1372,36 @@ pub fn serve( .and(warp::query::()) .and(warp::path::end()) .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( |validation_level: api_types::BroadcastValidationQuery, block_bytes: Bytes, + task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| async move { - let block = match SignedBeaconBlock::::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) { - Ok(data) => data, - Err(_) => { - return warp::reply::with_status( - StatusCode::BAD_REQUEST, - eth2::StatusCode::BAD_REQUEST, - ) - .into_response(); - } - }; - match publish_blocks::publish_block( - None, - ProvenancedBlock::local(Arc::new(block)), - chain, - &network_tx, - log, - validation_level.broadcast_validation, - ) - .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block = + SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "invalid SSZ: {e:?}" + )) + })?; + publish_blocks::publish_block( + None, + ProvenancedBlock::local(Arc::new(block)), + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + .map(|()| warp::reply().into_response()) + }) }, ); @@ -1439,7 +1419,7 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |block: SignedBeaconBlock>, task_spawner: TaskSpawner, chain: Arc>, @@ -1460,33 +1440,29 @@ pub fn serve( ); // POST beacon/blocks - let post_beacon_blinded_blocks_ssz = - eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .and(log_filter.clone()) - .and_then( - |block_bytes: Bytes, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| async move { - let block = - match SignedBeaconBlock::>::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) { - Ok(data) => data, - Err(e) => { - return Err(warp_utils::reject::custom_bad_request(format!( - "{:?}", - e - ))) - } - }; + let post_beacon_blinded_blocks_ssz = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block = SignedBeaconBlock::>::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_blinded_block( block, chain, @@ -1496,8 +1472,9 @@ pub fn serve( ) .await .map(|()| warp::reply().into_response()) - }, - ); + }) + }, + ); let post_beacon_blinded_blocks_v2 = eth_v2 .and(warp::path("beacon")) @@ -1617,7 +1594,7 @@ pub fn serve( .clone() .and(warp::path::end()) .and(warp::header::optional::("accept")) - .and_then( + .then( |endpoint_version: EndpointVersion, block_id: BlockId, task_spawner: TaskSpawner, @@ -1660,7 +1637,7 @@ pub fn serve( .clone() .and(warp::path("root")) .and(warp::path::end()) - .and_then( + .then( |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { @@ -1680,7 +1657,7 @@ pub fn serve( .clone() .and(warp::path("attestations")) .and(warp::path::end()) - .and_then( + .then( |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { @@ -1704,7 +1681,7 @@ pub fn serve( .and(chain_filter.clone()) .and(warp::path::end()) .and(warp::header::optional::("accept")) - .and_then( + .then( |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>, @@ -1762,7 +1739,7 @@ pub fn serve( .and(warp::body::json()) .and(network_tx_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, attestations: Vec>, @@ -1904,7 +1881,7 @@ pub fn serve( .and(warp::path("attestations")) .and(warp::path::end()) .and(warp::query::()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, query: api_types::AttestationPoolQuery| { @@ -1937,7 +1914,7 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(network_tx_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, slashing: AttesterSlashing, @@ -1979,7 +1956,7 @@ pub fn serve( .clone() .and(warp::path("attester_slashings")) .and(warp::path::end()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let attestations = chain.op_pool.get_all_attester_slashings(); @@ -1995,7 +1972,7 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(network_tx_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, slashing: ProposerSlashing, @@ -2037,7 +2014,7 @@ pub fn serve( .clone() .and(warp::path("proposer_slashings")) .and(warp::path::end()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let attestations = chain.op_pool.get_all_proposer_slashings(); @@ -2053,7 +2030,7 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(network_tx_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, exit: SignedVoluntaryExit, @@ -2093,7 +2070,7 @@ pub fn serve( .clone() .and(warp::path("voluntary_exits")) .and(warp::path::end()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let attestations = chain.op_pool.get_all_voluntary_exits(); @@ -2110,7 +2087,7 @@ pub fn serve( .and(warp::body::json()) .and(network_tx_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, signatures: Vec, @@ -2130,7 +2107,7 @@ pub fn serve( .clone() .and(warp::path("bls_to_execution_changes")) .and(warp::path::end()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); @@ -2147,7 +2124,7 @@ pub fn serve( .and(warp::body::json()) .and(network_tx_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, address_changes: Vec, @@ -2239,7 +2216,7 @@ pub fn serve( .and(warp::header::optional::("accept")) .and(task_spawner_filter.clone()) .and(eth1_service_filter.clone()) - .and_then( + .then( |accept_header: Option, task_spawner: TaskSpawner, eth1_service: eth1::Service| { @@ -2293,7 +2270,7 @@ pub fn serve( .and(warp::path("blocks")) .and(block_id_or_err) .and(warp::path::end()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, block_id: BlockId| { @@ -2326,7 +2303,7 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path::end()) .and(warp::body::json()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, epoch: Epoch, @@ -2378,7 +2355,7 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(log_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, block_id: BlockId, @@ -2411,7 +2388,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let forks = ForkName::list_all() @@ -2430,7 +2407,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( move |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { let config_and_preset = @@ -2446,7 +2423,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { Ok(api_types::GenericResponse::from( @@ -2477,7 +2454,7 @@ pub fn serve( .and(warp::header::optional::("accept")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |endpoint_version: EndpointVersion, state_id: StateId, accept_header: Option, @@ -2537,7 +2514,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |endpoint_version: EndpointVersion, task_spawner: TaskSpawner, chain: Arc>| { @@ -2576,7 +2553,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); @@ -2631,7 +2608,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, network_globals: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { @@ -2687,7 +2664,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(network_globals.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, network_globals: Arc>, chain: Arc>| { @@ -2738,7 +2715,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(network_globals.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, network_globals: Arc>, chain: Arc>| { @@ -2786,7 +2763,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then( + .then( |requested_peer_id: String, task_spawner: TaskSpawner, network_globals: Arc>| { @@ -2846,7 +2823,7 @@ pub fn serve( .and(multi_key_query::()) .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then( + .then( |query_res: Result, task_spawner: TaskSpawner, network_globals: Arc>| { @@ -2916,7 +2893,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, network_globals: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { @@ -2969,7 +2946,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |epoch: Epoch, task_spawner: TaskSpawner, chain: Arc>, @@ -2995,7 +2972,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, @@ -3064,7 +3041,7 @@ pub fn serve( .and(warp::query::()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |slot: Slot, query: api_types::ValidatorBlocksQuery, task_spawner: TaskSpawner, @@ -3121,7 +3098,7 @@ pub fn serve( .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |query: api_types::ValidatorAttestationDataQuery, task_spawner: TaskSpawner, chain: Arc>| { @@ -3156,7 +3133,7 @@ pub fn serve( .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |query: api_types::ValidatorAggregateAttestationQuery, task_spawner: TaskSpawner, chain: Arc>| { @@ -3197,7 +3174,7 @@ pub fn serve( .and(warp::body::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |epoch: Epoch, indices: api_types::ValidatorIndexData, task_spawner: TaskSpawner, @@ -3223,7 +3200,7 @@ pub fn serve( .and(warp::body::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |epoch: Epoch, indices: api_types::ValidatorIndexData, task_spawner: TaskSpawner, @@ -3243,7 +3220,7 @@ pub fn serve( .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |sync_committee_data: SyncContributionData, task_spawner: TaskSpawner, chain: Arc>| { @@ -3277,7 +3254,7 @@ pub fn serve( .and(warp::body::json()) .and(network_tx_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, aggregates: Vec>, @@ -3390,7 +3367,7 @@ pub fn serve( .and(warp::body::json()) .and(network_tx_filter) .and(log_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, contributions: Vec>, @@ -3418,7 +3395,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |subscriptions: Vec, validator_subscription_tx: Sender, task_spawner: TaskSpawner, @@ -3470,7 +3447,7 @@ pub fn serve( .and(chain_filter.clone()) .and(log_filter.clone()) .and(warp::body::json()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, log: Logger, @@ -3521,15 +3498,15 @@ pub fn serve( .and(chain_filter.clone()) .and(log_filter.clone()) .and(warp::body::json()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>, log: Logger, register_val_data: Vec| async { let (tx, rx) = oneshot::channel(); - task_spawner - .spawn_async_with_rejection(Priority::P0, async move { + let initial_result = task_spawner + .spawn_async_with_rejection_no_conversion(Priority::P0, async move { let execution_layer = chain .execution_layer .as_ref() @@ -3671,17 +3648,22 @@ pub fn serve( // from what is sent back down the channel. Ok(warp::reply::reply().into_response()) }) - .await?; + .await; + + if initial_result.is_err() { + return task_spawner::convert_rejection(initial_result).await; + } // Await a response from the builder without blocking a // `BeaconProcessor` worker. - rx.await.unwrap_or_else(|_| { + task_spawner::convert_rejection(rx.await.unwrap_or_else(|_| { Ok(warp::reply::with_status( warp::reply::json(&"No response from channel"), eth2::StatusCode::INTERNAL_SERVER_ERROR, ) .into_response()) - }) + })) + .await }, ); // POST validator/sync_committee_subscriptions @@ -3694,7 +3676,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |subscriptions: Vec, validator_subscription_tx: Sender, task_spawner: TaskSpawner, @@ -3738,7 +3720,7 @@ pub fn serve( .and(warp::body::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |epoch: Epoch, indices: Vec, task_spawner: TaskSpawner, @@ -3779,7 +3761,7 @@ pub fn serve( .and(warp::body::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |request_data: api_types::LivenessRequestData, task_spawner: TaskSpawner, chain: Arc>| { @@ -3823,7 +3805,7 @@ pub fn serve( .and(warp::path("health")) .and(warp::path::end()) .and(task_spawner_filter.clone()) - .and_then(|task_spawner: TaskSpawner| { + .then(|task_spawner: TaskSpawner| { task_spawner.blocking_json_task(Priority::P0, move || { eth2::lighthouse::Health::observe() .map(api_types::GenericResponse::from) @@ -3841,7 +3823,7 @@ pub fn serve( .and(app_start_filter) .and(data_dir_filter) .and(network_globals.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, sysinfo, app_start: std::time::Instant, @@ -3866,7 +3848,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { ui::get_validator_count(chain).map(api_types::GenericResponse::from) @@ -3882,7 +3864,7 @@ pub fn serve( .and(warp::body::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |request_data: ui::ValidatorMetricsRequestData, task_spawner: TaskSpawner, chain: Arc>| { @@ -3901,7 +3883,7 @@ pub fn serve( .and(warp::body::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |request_data: ui::ValidatorInfoRequestData, task_spawner: TaskSpawner, chain: Arc>| { @@ -3918,7 +3900,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, network_globals: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { @@ -3934,7 +3916,7 @@ pub fn serve( .and(warp::path("nat")) .and(task_spawner_filter.clone()) .and(warp::path::end()) - .and_then(|task_spawner: TaskSpawner| { + .then(|task_spawner: TaskSpawner| { task_spawner.blocking_json_task(Priority::P1, move || { Ok(api_types::GenericResponse::from( lighthouse_network::metrics::NAT_OPEN @@ -3952,7 +3934,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(network_globals.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, network_globals: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { @@ -3976,7 +3958,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(network_globals) - .and_then( + .then( |task_spawner: TaskSpawner, network_globals: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { @@ -3999,7 +3981,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_response_task(Priority::P1, move || { Ok::<_, warp::Rejection>(warp::reply::json( @@ -4023,7 +4005,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |epoch: Epoch, validator_id: ValidatorId, task_spawner: TaskSpawner, @@ -4043,7 +4025,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |epoch: Epoch, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { validator_inclusion::global_validator_inclusion_data(epoch, &chain) @@ -4059,7 +4041,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let current_slot_opt = chain.slot().ok(); @@ -4092,7 +4074,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(eth1_service_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, eth1_service: eth1::Service| { task_spawner.blocking_json_task(Priority::P1, move || { Ok(api_types::GenericResponse::from( @@ -4114,7 +4096,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(eth1_service_filter) - .and_then( + .then( |task_spawner: TaskSpawner, eth1_service: eth1::Service| { task_spawner.blocking_json_task(Priority::P1, move || { Ok(api_types::GenericResponse::from( @@ -4139,7 +4121,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>| { @@ -4166,7 +4148,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { if chain.eth1_chain.is_some() { @@ -4190,7 +4172,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || database::info(chain)) }, @@ -4203,7 +4185,7 @@ pub fn serve( .and(not_while_syncing_filter) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { chain.store_migrator.process_reconstruction(); @@ -4220,7 +4202,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |blocks: Vec>>, task_spawner: TaskSpawner, chain: Arc>, @@ -4246,7 +4228,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then(|query, task_spawner: TaskSpawner, chain, log| { + .then(|query, task_spawner: TaskSpawner, chain, log| { task_spawner.blocking_json_task(Priority::P1, move || { block_rewards::get_block_rewards(query, chain, log) }) @@ -4261,7 +4243,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and_then( + .then( |blocks, task_spawner: TaskSpawner, chain, log| { task_spawner.blocking_json_task(Priority::P1, move || { block_rewards::compute_block_rewards(blocks, chain, log) @@ -4278,7 +4260,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |target, query, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { attestation_performance::get_attestation_performance(target, query, chain) @@ -4294,7 +4276,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |query, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { block_packing_efficiency::get_block_packing_efficiency(query, chain) @@ -4308,7 +4290,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and_then( + .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P1, async move { let merge_readiness = chain.check_merge_readiness().await; @@ -4326,7 +4308,7 @@ pub fn serve( .and(multi_key_query::()) .and(task_spawner_filter.clone()) .and(chain_filter) - .and_then( + .then( |topics_res: Result, task_spawner: TaskSpawner, chain: Arc>| { @@ -4403,7 +4385,7 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter) .and(sse_component_filter) - .and_then( + .then( |task_spawner: TaskSpawner, sse_component: Option| { task_spawner.blocking_response_task(Priority::P1, move || { if let Some(logging_components) = sse_component { diff --git a/beacon_node/http_api/src/task_spawner.rs b/beacon_node/http_api/src/task_spawner.rs index b4da67f77c..503faff717 100644 --- a/beacon_node/http_api/src/task_spawner.rs +++ b/beacon_node/http_api/src/task_spawner.rs @@ -35,6 +35,24 @@ pub struct TaskSpawner { beacon_processor_send: Option>, } +/// Convert a warp `Rejection` into a `Response`. +/// +/// This function should *always* be used to convert rejections into responses. This prevents warp +/// from trying to backtrack in strange ways. See: https://github.com/sigp/lighthouse/issues/3404 +pub async fn convert_rejection(res: Result) -> Response { + match res { + Ok(response) => response.into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + warp::reply::json(&"unhandled error"), + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } +} + impl TaskSpawner { pub fn new(beacon_processor_send: Option>) -> Self { Self { @@ -43,11 +61,7 @@ impl TaskSpawner { } /// Executes a "blocking" (non-async) task which returns a `Response`. - pub async fn blocking_response_task( - self, - priority: Priority, - func: F, - ) -> Result + pub async fn blocking_response_task(self, priority: Priority, func: F) -> Response where F: FnOnce() -> Result + Send + Sync + 'static, T: Reply + Send + 'static, @@ -65,31 +79,25 @@ impl TaskSpawner { }; // Send the function to the beacon processor for execution at some arbitrary time. - match send_to_beacon_processor( + let result = send_to_beacon_processor( beacon_processor_send, priority, BlockingOrAsync::Blocking(Box::new(process_fn)), rx, ) .await - { - Ok(result) => result.map(Reply::into_response), - Err(error_response) => Ok(error_response), - } + .and_then(|x| x); + convert_rejection(result).await } else { // There is no beacon processor so spawn a task directly on the // tokio executor. - warp_utils::task::blocking_response_task(func).await + convert_rejection(warp_utils::task::blocking_response_task(func).await).await } } /// Executes a "blocking" (non-async) task which returns a JSON-serializable /// object. - pub async fn blocking_json_task( - self, - priority: Priority, - func: F, - ) -> Result + pub async fn blocking_json_task(self, priority: Priority, func: F) -> Response where F: FnOnce() -> Result + Send + Sync + 'static, T: Serialize + Send + 'static, @@ -98,11 +106,26 @@ impl TaskSpawner { self.blocking_response_task(priority, func).await } - /// Executes an async task which may return a `warp::Rejection`. + /// Executes an async task which may return a `Rejection`, which will be converted to a response. pub async fn spawn_async_with_rejection( self, priority: Priority, func: impl Future> + Send + Sync + 'static, + ) -> Response { + let result = self + .spawn_async_with_rejection_no_conversion(priority, func) + .await; + convert_rejection(result).await + } + + /// Same as `spawn_async_with_rejection` but returning a result with the unhandled rejection. + /// + /// If you call this function you MUST convert the rejection to a response and not let it + /// propagate into Warp's filters. See `convert_rejection`. + pub async fn spawn_async_with_rejection_no_conversion( + self, + priority: Priority, + func: impl Future> + Send + Sync + 'static, ) -> Result { if let Some(beacon_processor_send) = &self.beacon_processor_send { // Create a wrapper future that will execute `func` and send the @@ -124,18 +147,16 @@ impl TaskSpawner { rx, ) .await - .unwrap_or_else(Result::Ok) + .and_then(|x| x) } else { // There is no beacon processor so spawn a task directly on the // tokio executor. - tokio::task::spawn(func).await.unwrap_or_else(|e| { - let response = warp::reply::with_status( - warp::reply::json(&format!("Tokio did not execute task: {e:?}")), - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(); - Ok(response) - }) + tokio::task::spawn(func) + .await + .map_err(|_| { + warp_utils::reject::custom_server_error("Tokio failed to spawn task".into()) + }) + .and_then(|x| x) } } @@ -158,14 +179,14 @@ impl TaskSpawner { }; // Send the function to the beacon processor for execution at some arbitrary time. - send_to_beacon_processor( + let result = send_to_beacon_processor( beacon_processor_send, priority, BlockingOrAsync::Async(Box::pin(process_fn)), rx, ) - .await - .unwrap_or_else(|error_response| error_response) + .await; + convert_rejection(result).await } else { // There is no beacon processor so spawn a task directly on the // tokio executor. @@ -182,14 +203,14 @@ impl TaskSpawner { /// Send a task to the beacon processor and await execution. /// -/// If the task is not executed, return an `Err(response)` with an error message +/// If the task is not executed, return an `Err` with an error message /// for the API consumer. async fn send_to_beacon_processor( beacon_processor_send: &BeaconProcessorSend, priority: Priority, process_fn: BlockingOrAsync, rx: oneshot::Receiver, -) -> Result { +) -> Result { let error_message = match beacon_processor_send.try_send(priority.work_event(process_fn)) { Ok(()) => { match rx.await { @@ -205,10 +226,7 @@ async fn send_to_beacon_processor( Err(TrySendError::Closed(_)) => "The task was dropped. The server is shutting down.", }; - let error_response = warp::reply::with_status( - warp::reply::json(&error_message), - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(); - Err(error_response) + Err(warp_utils::reject::custom_server_error( + error_message.to_string(), + )) } From 59c24bcd2d83f1fcafc6ee4db9cf7b6f88ed7459 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 14 Aug 2023 06:08:34 +0000 Subject: [PATCH 49/75] Fix disable backfill flag not working correctly (#4615) ## Issue Addressed The feature flag used to control this feature is `disable_backfill` instead of `disable-backfill`. kudos to @michaelsproul for discovering this bug! --- beacon_node/network/src/sync/manager.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 670e88eac5..b910f7b33c 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -395,7 +395,7 @@ impl SyncManager { // If we would otherwise be synced, first check if we need to perform or // complete a backfill sync. - #[cfg(not(feature = "disable_backfill"))] + #[cfg(not(feature = "disable-backfill"))] if matches!(sync_state, SyncState::Synced) { // Determine if we need to start/resume/restart a backfill sync. match self.backfill_sync.start(&mut self.network) { @@ -420,7 +420,7 @@ impl SyncManager { } Some((RangeSyncType::Finalized, start_slot, target_slot)) => { // If there is a backfill sync in progress pause it. - #[cfg(not(feature = "disable_backfill"))] + #[cfg(not(feature = "disable-backfill"))] self.backfill_sync.pause(); SyncState::SyncingFinalized { @@ -430,7 +430,7 @@ impl SyncManager { } Some((RangeSyncType::Head, start_slot, target_slot)) => { // If there is a backfill sync in progress pause it. - #[cfg(not(feature = "disable_backfill"))] + #[cfg(not(feature = "disable-backfill"))] self.backfill_sync.pause(); SyncState::SyncingHead { From 7251a93c5edf0b5014e6cac499e643c37e630eab Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 17 Aug 2023 02:37:29 +0000 Subject: [PATCH 50/75] Don't kill SSE stream if channel fills up (#4500) ## Issue Addressed Closes #4245 ## Proposed Changes - If an SSE channel fills up, send a comment instead of terminating the stream. - Add a CLI flag for scaling up the SSE buffer: `--http-sse-capacity-multiplier N`. ## Additional Info ~~Blocked on #4462. I haven't rebased on that PR yet for initial testing, because it still needs some more work to handle long-running HTTP threads.~~ - [x] Add CLI flag tests. --- beacon_node/beacon_chain/src/events.rs | 7 ++-- beacon_node/client/src/builder.rs | 5 ++- beacon_node/http_api/src/lib.rs | 48 ++++++++++++++++---------- beacon_node/http_api/src/test_utils.rs | 1 + beacon_node/src/cli.rs | 9 +++++ beacon_node/src/config.rs | 3 ++ lighthouse/tests/beacon_node.rs | 15 ++++++++ 7 files changed, 67 insertions(+), 21 deletions(-) diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index fed0503237..b267cc853f 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -21,8 +21,11 @@ pub struct ServerSentEventHandler { } impl ServerSentEventHandler { - pub fn new(log: Logger) -> Self { - Self::new_with_capacity(log, DEFAULT_CHANNEL_CAPACITY) + pub fn new(log: Logger, capacity_multiplier: usize) -> Self { + Self::new_with_capacity( + log, + capacity_multiplier.saturating_mul(DEFAULT_CHANNEL_CAPACITY), + ) } pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 8383963b7c..bd2946bb7a 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -157,7 +157,10 @@ where let context = runtime_context.service_context("beacon".into()); let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?; let event_handler = if self.http_api_config.enabled { - Some(ServerSentEventHandler::new(context.log().clone())) + Some(ServerSentEventHandler::new( + context.log().clone(), + self.http_api_config.sse_capacity_multiplier, + )) } else { None }; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 4d5b98a823..8e316834d0 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -65,7 +65,10 @@ use tokio::sync::{ mpsc::{Sender, UnboundedSender}, oneshot, }; -use tokio_stream::{wrappers::BroadcastStream, StreamExt}; +use tokio_stream::{ + wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}, + StreamExt, +}; use types::{ Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, @@ -132,6 +135,7 @@ pub struct Config { pub allow_sync_stalled: bool, pub spec_fork_name: Option, pub data_dir: PathBuf, + pub sse_capacity_multiplier: usize, pub enable_beacon_processor: bool, } @@ -146,6 +150,7 @@ impl Default for Config { allow_sync_stalled: false, spec_fork_name: None, data_dir: PathBuf::from(DEFAULT_ROOT_DIR), + sse_capacity_multiplier: 1, enable_beacon_processor: true, } } @@ -4348,22 +4353,29 @@ pub fn serve( } }; - receivers.push(BroadcastStream::new(receiver).map(|msg| { - match msg { - Ok(data) => Event::default() - .event(data.topic_name()) - .json_data(data) - .map_err(|e| { - warp_utils::reject::server_sent_event_error(format!( - "{:?}", - e - )) - }), - Err(e) => Err(warp_utils::reject::server_sent_event_error( - format!("{:?}", e), - )), - } - })); + receivers.push( + BroadcastStream::new(receiver) + .map(|msg| { + match msg { + Ok(data) => Event::default() + .event(data.topic_name()) + .json_data(data) + .unwrap_or_else(|e| { + Event::default() + .comment(format!("error - bad json: {e:?}")) + }), + // Do not terminate the stream if the channel fills + // up. Just drop some messages and send a comment to + // the client. + Err(BroadcastStreamRecvError::Lagged(n)) => { + Event::default().comment(format!( + "error - dropped {n} messages" + )) + } + } + }) + .map(Ok::<_, std::convert::Infallible>), + ); } } else { return Err(warp_utils::reject::custom_server_error( @@ -4373,7 +4385,7 @@ pub fn serve( let s = futures::stream::select_all(receivers); - Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s))) + Ok(warp::sse::reply(warp::sse::keep_alive().stream(s))) }) }, ); diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 0367776f8d..dcc2532229 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -225,6 +225,7 @@ pub async fn create_api_server_on_port( allow_sync_stalled: false, data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, + sse_capacity_multiplier: 1, enable_beacon_processor: true, }, chain: Some(chain), diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0330bd3f7c..974dabbf0c 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -382,6 +382,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { stalled. This is useful for very small testnets. TESTING ONLY. DO NOT USE ON \ MAINNET.") ) + .arg( + Arg::with_name("http-sse-capacity-multiplier") + .long("http-sse-capacity-multiplier") + .takes_value(true) + .default_value("1") + .value_name("N") + .help("Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. \ + Increasing this value can prevent messages from being dropped.") + ) .arg( Arg::with_name("http-enable-beacon-processor") .long("http-enable-beacon-processor") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 21df86620d..0d2a5d812b 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -149,6 +149,9 @@ pub fn get_config( client_config.http_api.allow_sync_stalled = true; } + client_config.http_api.sse_capacity_multiplier = + parse_required(cli_args, "http-sse-capacity-multiplier")?; + client_config.http_api.enable_beacon_processor = parse_required(cli_args, "http-enable-beacon-processor")?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index ecc936cbfb..02b9512012 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2349,3 +2349,18 @@ fn beacon_processor_zero_workers() { .flag("beacon-processor-max-workers", Some("0")) .run_with_zero_port(); } + +#[test] +fn http_sse_capacity_multiplier_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.sse_capacity_multiplier, 1)); +} + +#[test] +fn http_sse_capacity_multiplier_override() { + CommandLineTest::new() + .flag("http-sse-capacity-multiplier", Some("10")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.sse_capacity_multiplier, 10)); +} From 609819bb4d08faef914a26b681b6f121cb55ab56 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Thu, 17 Aug 2023 02:37:30 +0000 Subject: [PATCH 51/75] `attester_duties`: remove unnecessary case (#4614) Since `tolerant_current_epoch` is expected to be either `current_epoch` or `current_epoch+1`, we can eliminate a case here. And added a comment about `compute_historic_attester_duties` , since `RelativeEpoch::from_epoch` will only allow `request_epoch == current_epoch-1` when `request_epoch < current_epoch`. --- beacon_node/http_api/src/attester_duties.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index aad405d56b..f3242a2b37 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -35,7 +35,6 @@ pub fn attester_duties( .epoch(T::EthSpec::slots_per_epoch()); if request_epoch == current_epoch - || request_epoch == tolerant_current_epoch || request_epoch == current_epoch + 1 || request_epoch == tolerant_current_epoch + 1 { @@ -46,7 +45,7 @@ pub fn attester_duties( request_epoch, current_epoch ))) } else { - // request_epoch < current_epoch + // request_epoch < current_epoch, in fact we only allow `request_epoch == current_epoch-1` in this case compute_historic_attester_duties(request_epoch, request_indices, chain) } } From b33bfe25b7bc6ea82a97a270a3d4acd72f4e9ee0 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Thu, 17 Aug 2023 02:37:31 +0000 Subject: [PATCH 52/75] add `metrics::VALIDATOR_DUTIES_SYNC_HTTP_POST` for `post_validator_duties_sync` (#4617) It seems `post_validator_duties_sync` is the only api which doesn't have its own metric in `duties_service`, this PR adds `metrics::VALIDATOR_DUTIES_SYNC_HTTP_POST` for completeness. --- validator_client/src/duties_service/sync.rs | 6 ++++++ validator_client/src/http_metrics/metrics.rs | 1 + 2 files changed, 7 insertions(+) diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index 1e66d947a2..cf63d8ac62 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -2,8 +2,10 @@ use crate::beacon_node_fallback::{OfflineOnFailure, RequireSynced}; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, + http_metrics::metrics, validator_store::Error as ValidatorStoreError, }; + use futures::future::join_all; use itertools::Itertools; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -426,6 +428,10 @@ pub async fn poll_sync_committee_duties_for_period Date: Thu, 17 Aug 2023 02:37:32 +0000 Subject: [PATCH 53/75] Update blst to 0.3.11 (#4624) ## Proposed Changes This PR updates `blst` to 0.3.11, which gives us _runtime detection of CPU features_ :tada: Although [performance benchmarks](https://gist.github.com/michaelsproul/f759fa28dfa4003962507db34b439d6c) don't show a substantial detriment to running the `portable` build vs `modern`, in order to take things slowly I propose the following roll-out strategy: - Keep both `modern` and `portable` builds for releases/Docker images. - Run the `portable` build on half of SigP's infrastructure to monitor for performance deficits. - Listen out for user issues with the `portable` builds (e.g. SIGILLs from misdetected hardware). - Make the `portable` build the default and remove the `modern` build from our release binaries & Docker images. --- Cargo.lock | 16 ++-------------- Makefile | 2 +- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 233b3901eb..116f8081fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -732,14 +732,13 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a30d0edd9dd1c60ddb42b80341c7852f6f985279a5c1a83659dcb65899dec99" +checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" dependencies = [ "cc", "glob", "threadpool", - "which", "zeroize", ] @@ -8981,17 +8980,6 @@ dependencies = [ "rustls-webpki 0.100.1", ] -[[package]] -name = "which" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] - [[package]] name = "widestring" version = "0.4.3" diff --git a/Makefile b/Makefile index b833686e1b..cb447e26a5 100644 --- a/Makefile +++ b/Makefile @@ -207,7 +207,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 + cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: From 687c58fde02b406441e8e8f2b46449adf6458cb8 Mon Sep 17 00:00:00 2001 From: ethDreamer Date: Fri, 18 Aug 2023 03:22:27 +0000 Subject: [PATCH 54/75] Fix Prefer Builder Flag (#4622) --- beacon_node/client/src/config.rs | 2 -- beacon_node/src/config.rs | 7 ++--- lighthouse/tests/beacon_node.rs | 47 ++++++++++++++++++++++---------- 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index b4deb52fc3..283efe9c3f 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -80,7 +80,6 @@ pub struct Config { pub monitoring_api: Option, pub slasher: Option, pub logger_config: LoggerConfig, - pub always_prefer_builder_payload: bool, pub beacon_processor: BeaconProcessorConfig, } @@ -108,7 +107,6 @@ impl Default for Config { validator_monitor_pubkeys: vec![], validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, logger_config: LoggerConfig::default(), - always_prefer_builder_payload: false, beacon_processor: <_>::default(), } } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0d2a5d812b..d2e92b48f3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -351,6 +351,9 @@ pub fn get_config( el_config.default_datadir = client_config.data_dir().clone(); el_config.builder_profit_threshold = clap_utils::parse_required(cli_args, "builder-profit-threshold")?; + el_config.always_prefer_builder_payload = + cli_args.is_present("always-prefer-builder-payload"); + let execution_timeout_multiplier = clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); @@ -801,10 +804,6 @@ pub fn get_config( if cli_args.is_present("genesis-backfill") { client_config.chain.genesis_backfill = true; } - // Payload selection configs - if cli_args.is_present("always-prefer-builder-payload") { - client_config.always_prefer_builder_payload = true; - } // Backfill sync rate-limiting client_config.beacon_processor.enable_backfill_rate_limiting = diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 02b9512012..37d224dbc3 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -366,21 +366,6 @@ fn genesis_backfill_with_historic_flag() { .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); } -#[test] -fn always_prefer_builder_payload_flag() { - CommandLineTest::new() - .flag("always-prefer-builder-payload", None) - .run_with_zero_port() - .with_config(|config| assert!(config.always_prefer_builder_payload)); -} - -#[test] -fn no_flag_sets_always_prefer_builder_payload_to_false() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| assert!(!config.always_prefer_builder_payload)); -} - // Tests for Eth1 flags. #[test] fn dummy_eth1_flag() { @@ -735,6 +720,38 @@ fn builder_fallback_flags() { ); }, ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("always-prefer-builder-payload"), + None, + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .always_prefer_builder_payload, + true + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .always_prefer_builder_payload, + false + ); + }, + ); } #[test] From 20067b94655c9964c62b536831b1d093b3dc57f8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Aug 2023 05:02:32 +0000 Subject: [PATCH 55/75] Remove checkpoint alignment requirements and enable historic state pruning (#4610) ## Issue Addressed Closes #3210 Closes #3211 ## Proposed Changes - Checkpoint sync from the latest finalized state regardless of its alignment. - Add the `block_root` to the database's split point. This is _only_ added to the in-memory split in order to avoid a schema migration. See `load_split`. - Add a new method to the DB called `get_advanced_state`, which looks up a state _by block root_, with a `state_root` as fallback. Using this method prevents accidental accesses of the split's unadvanced state, which does not exist in the hot DB and is not guaranteed to exist in the freezer DB at all. Previously Lighthouse would look up this state _from the freezer DB_, even if it was required for block/attestation processing, which was suboptimal. - Replace several state look-ups in block and attestation processing with `get_advanced_state` so that they can't hit the split block's unadvanced state. - Do not store any states in the freezer database by default. All states will be deleted upon being evicted from the hot database unless `--reconstruct-historic-states` is set. The anchor info which was previously used for checkpoint sync is used to implement this, including when syncing from genesis. ## Additional Info Needs further testing. I want to stress-test the pruned database under Hydra. The `get_advanced_state` method is intended to become more relevant over time: `tree-states` includes an identically named method that returns advanced states from its in-memory cache. Co-authored-by: realbigsean --- beacon_node/beacon_chain/src/beacon_chain.rs | 13 +- .../src/beacon_fork_choice_store.rs | 12 +- .../beacon_chain/src/block_verification.rs | 17 +- beacon_node/beacon_chain/src/builder.rs | 86 +++-- .../beacon_chain/src/canonical_head.rs | 23 +- beacon_node/beacon_chain/src/migrate.rs | 8 +- .../tests/attestation_verification.rs | 10 +- .../beacon_chain/tests/block_verification.rs | 7 +- .../tests/payload_invalidation.rs | 6 +- beacon_node/beacon_chain/tests/store_tests.rs | 360 +++++++++++++----- beacon_node/beacon_chain/tests/tests.rs | 6 +- beacon_node/client/Cargo.toml | 2 +- beacon_node/client/src/builder.rs | 76 +--- beacon_node/http_api/tests/tests.rs | 21 +- .../gossip_methods.rs | 2 +- beacon_node/store/src/hot_cold_store.rs | 230 ++++++++--- beacon_node/store/src/metadata.rs | 3 + book/src/checkpoint-sync.md | 23 +- consensus/fork_choice/src/fork_choice.rs | 3 +- .../src/fork_choice_test_definition.rs | 1 + .../src/proto_array_fork_choice.rs | 5 +- .../src/per_slot_processing.rs | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 6 +- testing/node_test_rig/src/lib.rs | 3 + watch/tests/tests.rs | 9 +- 25 files changed, 633 insertions(+), 301 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 987ea9e7c3..d5b86d63ff 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4656,6 +4656,7 @@ impl BeaconChain { self.log, "Produced block on state"; "block_size" => block_size, + "slot" => block.slot(), ); metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); @@ -5571,14 +5572,16 @@ impl BeaconChain { let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { (state, state_root) } else { - let state_root = head_block.state_root; - let state = self + let block_state_root = head_block.state_root; + let max_slot = shuffling_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let (state_root, state) = self .store .get_inconsistent_state_for_attestation_verification_only( - &state_root, - Some(head_block.slot), + &head_block_root, + max_slot, + block_state_root, )? - .ok_or(Error::MissingBeaconState(head_block.state_root))?; + .ok_or(Error::MissingBeaconState(block_state_root))?; (state, state_root) }; diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 9b2edbd8b5..2a42b49b42 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -321,9 +321,17 @@ where .deconstruct() .0; - let state = self + let max_slot = self + .justified_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + let (_, state) = self .store - .get_state(&justified_block.state_root(), Some(justified_block.slot())) + .get_advanced_hot_state( + self.justified_checkpoint.root, + max_slot, + justified_block.state_root(), + ) .map_err(Error::FailedToReadState)? .ok_or_else(|| Error::MissingState(justified_block.state_root()))?; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 0a82eae371..3654484e1f 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1261,7 +1261,7 @@ impl ExecutionPendingBlock { // Perform a sanity check on the pre-state. let parent_slot = parent.beacon_block.slot(); - if state.slot() < parent_slot || state.slot() > parent_slot + 1 { + if state.slot() < parent_slot || state.slot() > block.slot() { return Err(BeaconChainError::BadPreState { parent_root: parent.beacon_block_root, parent_slot, @@ -1760,13 +1760,18 @@ fn load_parent( BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root())) })?; - // Load the parent blocks state from the database, returning an error if it is not found. + // Load the parent block's state from the database, returning an error if it is not found. // It is an error because if we know the parent block we should also know the parent state. - let parent_state_root = parent_block.state_root(); - let parent_state = chain - .get_state(&parent_state_root, Some(parent_block.slot()))? + // Retrieve any state that is advanced through to at most `block.slot()`: this is + // particularly important if `block` descends from the finalized/split block, but at a slot + // prior to the finalized slot (which is invalid and inaccessible in our DB schema). + let (parent_state_root, parent_state) = chain + .store + .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? .ok_or_else(|| { - BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root)) + BeaconChainError::DBInconsistent( + format!("Missing state for parent block {root:?}",), + ) })?; metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 044391c415..bb629d6624 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -24,8 +24,9 @@ use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; -use slog::{crit, error, info, Logger}; +use slog::{crit, debug, error, info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; +use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; @@ -287,7 +288,7 @@ where let genesis_state = store .get_state(&genesis_block.state_root(), Some(genesis_block.slot())) .map_err(|e| descriptive_db_error("genesis state", &e))? - .ok_or("Genesis block not found in store")?; + .ok_or("Genesis state not found in store")?; self.genesis_time = Some(genesis_state.genesis_time()); @@ -382,6 +383,16 @@ where let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?; self = updated_builder; + // Stage the database's metadata fields for atomic storage when `build` is called. + // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent + // historic states from being retained (unless `--reconstruct-historic-states` is set). + let retain_historic_states = self.chain_config.reconstruct_historic_states; + self.pending_io_batch.push( + store + .init_anchor_info(genesis.beacon_block.message(), retain_historic_states) + .map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?, + ); + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; let current_slot = None; @@ -408,30 +419,28 @@ where weak_subj_block: SignedBeaconBlock, genesis_state: BeaconState, ) -> Result { - let store = self.store.clone().ok_or("genesis_state requires a store")?; + let store = self + .store + .clone() + .ok_or("weak_subjectivity_state requires a store")?; + let log = self + .log + .as_ref() + .ok_or("weak_subjectivity_state requires a log")?; - let weak_subj_slot = weak_subj_state.slot(); - let weak_subj_block_root = weak_subj_block.canonical_root(); - let weak_subj_state_root = weak_subj_block.state_root(); - - // Check that the given block lies on an epoch boundary. Due to the database only storing - // full states on epoch boundaries and at restore points it would be difficult to support - // starting from a mid-epoch state. - if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 { - return Err(format!( - "Checkpoint block at slot {} is not aligned to epoch start. \ - Please supply an aligned checkpoint with block.slot % 32 == 0", - weak_subj_block.slot(), - )); - } - - // Check that the block and state have consistent slots and state roots. - if weak_subj_state.slot() != weak_subj_block.slot() { - return Err(format!( - "Slot of snapshot block ({}) does not match snapshot state ({})", - weak_subj_block.slot(), - weak_subj_state.slot(), - )); + // Ensure the state is advanced to an epoch boundary. + let slots_per_epoch = TEthSpec::slots_per_epoch(); + if weak_subj_state.slot() % slots_per_epoch != 0 { + debug!( + log, + "Advancing checkpoint state to boundary"; + "state_slot" => weak_subj_state.slot(), + "block_slot" => weak_subj_block.slot(), + ); + while weak_subj_state.slot() % slots_per_epoch != 0 { + per_slot_processing(&mut weak_subj_state, None, &self.spec) + .map_err(|e| format!("Error advancing state: {e:?}"))?; + } } // Prime all caches before storing the state in the database and computing the tree hash @@ -439,15 +448,19 @@ where weak_subj_state .build_caches(&self.spec) .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; - - let computed_state_root = weak_subj_state + let weak_subj_state_root = weak_subj_state .update_tree_hash_cache() .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; - if weak_subj_state_root != computed_state_root { + let weak_subj_slot = weak_subj_state.slot(); + let weak_subj_block_root = weak_subj_block.canonical_root(); + + // Validate the state's `latest_block_header` against the checkpoint block. + let state_latest_block_root = weak_subj_state.get_latest_block_root(weak_subj_state_root); + if weak_subj_block_root != state_latest_block_root { return Err(format!( - "Snapshot state root does not match block, expected: {:?}, got: {:?}", - weak_subj_state_root, computed_state_root + "Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}", + weak_subj_block_root, state_latest_block_root )); } @@ -464,7 +477,7 @@ where // Set the store's split point *before* storing genesis so that genesis is stored // immediately in the freezer DB. - store.set_split(weak_subj_slot, weak_subj_state_root); + store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root); let (_, updated_builder) = self.set_genesis_state(genesis_state)?; self = updated_builder; @@ -480,10 +493,11 @@ where // Stage the database's metadata fields for atomic storage when `build` is called. // This prevents the database from restarting in an inconsistent state if the anchor // info or split point is written before the `PersistedBeaconChain`. + let retain_historic_states = self.chain_config.reconstruct_historic_states; self.pending_io_batch.push(store.store_split_in_batch()); self.pending_io_batch.push( store - .init_anchor_info(weak_subj_block.message()) + .init_anchor_info(weak_subj_block.message(), retain_historic_states) .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, ); @@ -503,13 +517,12 @@ where let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; - let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( fc_store, snapshot.beacon_block_root, &snapshot.beacon_block, &snapshot.beacon_state, - current_slot, + Some(weak_subj_slot), &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -672,9 +685,8 @@ where Err(e) => return Err(descriptive_db_error("head block", &e)), }; - let head_state_root = head_block.state_root(); - let head_state = store - .get_state(&head_state_root, Some(head_block.slot())) + let (_head_state_root, head_state) = store + .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 2b1f714362..7fa5b01521 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -47,7 +47,8 @@ use crate::{ }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; use fork_choice::{ - ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses, + ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, + ResetPayloadStatuses, }; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -298,10 +299,10 @@ impl CanonicalHead { let beacon_block = store .get_full_block(&beacon_block_root)? .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; - let beacon_state_root = beacon_block.state_root(); - let beacon_state = store - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; + let current_slot = fork_choice.fc_store().get_current_slot(); + let (_, beacon_state) = store + .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? + .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; let snapshot = BeaconSnapshot { beacon_block_root, @@ -669,10 +670,14 @@ impl BeaconChain { .get_full_block(&new_view.head_block_root)? .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; - let beacon_state_root = beacon_block.state_root(); - let beacon_state: BeaconState = self - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; + let (_, beacon_state) = self + .store + .get_advanced_hot_state( + new_view.head_block_root, + current_slot, + beacon_block.state_root(), + )? + .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; Ok(BeaconSnapshot { beacon_block: Arc::new(beacon_block), diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 8306b66d7b..6353a64e00 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -266,6 +266,7 @@ impl, Cold: ItemStore> BackgroundMigrator state, @@ -319,7 +320,12 @@ impl, Cold: ItemStore> BackgroundMigrator {} Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { debug!( diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 5cea51090b..7878fd14aa 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -9,7 +9,7 @@ use beacon_chain::{ test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }, - BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, + BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped, }; use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; @@ -47,6 +47,10 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness Vec> { fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 9a8c324d09..cd4351297b 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -7,7 +7,7 @@ use beacon_chain::otb_verification_service::{ use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, + BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, @@ -59,6 +59,10 @@ impl InvalidPayloadRig { let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .logger(test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 2902774825..bf68657b4f 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -9,14 +9,15 @@ use beacon_chain::test_utils::{ use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, - BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, - ServerSentEventHandler, WhenSlotSkipped, + BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, + NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, }; use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; use rand::Rng; -use state_processing::BlockReplayer; +use slot_clock::{SlotClock, TestingSlotClock}; +use state_processing::{state_advance::complete_state_advance, BlockReplayer}; use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryInto; @@ -65,6 +66,19 @@ fn get_store_with_spec( fn get_harness( store: Arc, LevelDB>>, validator_count: usize, +) -> TestHarness { + // Most tests expect to retain historic states, so we use this as the default. + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }; + get_harness_generic(store, validator_count, chain_config) +} + +fn get_harness_generic( + store: Arc, LevelDB>>, + validator_count: usize, + chain_config: ChainConfig, ) -> TestHarness { let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() @@ -72,6 +86,7 @@ fn get_harness( .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() + .chain_config(chain_config) .build(); harness.advance_slot(); harness @@ -460,13 +475,15 @@ async fn block_replay_with_inaccurate_state_roots() { .await; // Slot must not be 0 mod 32 or else no blocks will be replayed. - let (mut head_state, head_root) = harness.get_current_state_and_root(); + let (mut head_state, head_state_root) = harness.get_current_state_and_root(); + let head_block_root = harness.head_block_root(); assert_ne!(head_state.slot() % 32, 0); - let mut fast_head_state = store + let (_, mut fast_head_state) = store .get_inconsistent_state_for_attestation_verification_only( - &head_root, - Some(head_state.slot()), + &head_block_root, + head_state.slot(), + head_state_root, ) .unwrap() .unwrap(); @@ -565,14 +582,7 @@ async fn block_replayer_hooks() { async fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let validators_keypairs = - types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); - let harness = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_disk_store(store.clone()) - .mock_execution_layer() - .build(); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let unforked_blocks: u64 = 4 * E::slots_per_epoch(); @@ -1015,18 +1025,14 @@ fn check_shuffling_compatible( // Ensure blocks from abandoned forks are pruned from the Hot DB #[tokio::test] async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1125,18 +1131,14 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { #[tokio::test] async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let slots_per_epoch = rig.slots_per_epoch(); let (state, state_root) = rig.get_current_state_and_root(); @@ -1260,15 +1262,11 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { const HONEST_VALIDATOR_COUNT: usize = 32; const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1352,18 +1350,14 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { #[tokio::test] async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let (state, state_root) = rig.get_current_state_and_root(); // Fill up 0th epoch with canonical chain blocks @@ -1497,18 +1491,14 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn prunes_skipped_slots_states() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = @@ -1626,18 +1616,14 @@ async fn prunes_skipped_slots_states() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn finalizes_non_epoch_start_slot() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = @@ -2053,39 +2039,82 @@ async fn garbage_collect_temp_states_from_failed_block() { } #[tokio::test] -async fn weak_subjectivity_sync() { +async fn weak_subjectivity_sync_easy() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let slots = (1..num_initial_slots).map(Slot::new).collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +#[tokio::test] +async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let slots = (1..num_initial_slots) + .map(Slot::new) + .filter(|&slot| { + // Skip 3 slots leading up to the checkpoint slot. + slot <= checkpoint_slot - 3 || slot > checkpoint_slot + }) + .collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +#[tokio::test] +async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9 - 3); + let slots = (1..num_initial_slots) + .map(Slot::new) + .filter(|&slot| { + // Skip 3 slots after the checkpoint slot. + slot <= checkpoint_slot || slot > checkpoint_slot + 3 + }) + .collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Build an initial chain on one harness, representing a synced node with full history. - let num_initial_blocks = E::slots_per_epoch() * 11; let num_final_blocks = E::slots_per_epoch() * 2; let temp1 = tempdir().unwrap(); let full_store = get_store(&temp1); let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); harness - .extend_chain( - num_initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, ) .await; - let genesis_state = full_store - .get_state(&harness.chain.genesis_state_root, Some(Slot::new(0))) + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) .unwrap() .unwrap(); - let wss_checkpoint = harness.finalized_checkpoint(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + let wss_block = harness .chain .store - .get_full_block(&wss_checkpoint.root) + .get_full_block(&wss_block_root) .unwrap() .unwrap(); let wss_state = full_store - .get_state(&wss_block.state_root(), None) + .get_state(&wss_state_root, Some(checkpoint_slot)) .unwrap() .unwrap(); - let wss_slot = wss_block.slot(); // Add more blocks that advance finalization further. harness.advance_slot(); @@ -2104,20 +2133,26 @@ async fn weak_subjectivity_sync() { let spec = test_spec::(); let seconds_per_slot = spec.seconds_per_slot; - // Initialise a new beacon chain from the finalized checkpoint + // Initialise a new beacon chain from the finalized checkpoint. + // The slot clock must be set to a time ahead of the checkpoint state. + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); let beacon_chain = Arc::new( BeaconChainBuilder::new(MinimalEthSpec) .store(store.clone()) .custom_spec(test_spec::()) .task_executor(harness.chain.task_executor.clone()) + .logger(log.clone()) .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) .unwrap() - .logger(log.clone()) .store_migrator_config(MigratorConfig::default().blocking()) .dummy_eth1_backend() .expect("should build dummy backend") - .testing_slot_clock(Duration::from_secs(seconds_per_slot)) - .expect("should configure testing slot clock") + .slot_clock(slot_clock) .shutdown_sender(shutdown_tx) .chain_config(ChainConfig::default()) .event_handler(Some(ServerSentEventHandler::new_with_capacity( @@ -2131,9 +2166,9 @@ async fn weak_subjectivity_sync() { // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); - let new_blocks = &chain_dump[wss_slot.as_usize() + 1..]; - - assert_eq!(new_blocks[0].beacon_block.slot(), wss_slot + 1); + let new_blocks = chain_dump + .iter() + .filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot); for snapshot in new_blocks { let full_block = harness @@ -2219,13 +2254,17 @@ async fn weak_subjectivity_sync() { assert_eq!(forwards, expected); // All blocks can be loaded. + let mut prev_block_root = Hash256::zero(); for (block_root, slot) in beacon_chain .forwards_iter_block_roots(Slot::new(0)) .unwrap() .map(Result::unwrap) { let block = store.get_blinded_block(&block_root).unwrap().unwrap(); - assert_eq!(block.slot(), slot); + if block_root != prev_block_root { + assert_eq!(block.slot(), slot); + } + prev_block_root = block_root; } // All states from the oldest state slot can be loaded. @@ -2240,14 +2279,141 @@ async fn weak_subjectivity_sync() { assert_eq!(state.canonical_root(), state_root); } - // Anchor slot is still set to the starting slot. - assert_eq!(store.get_anchor_slot(), Some(wss_slot)); + // Anchor slot is still set to the slot of the checkpoint block. + assert_eq!(store.get_anchor_slot(), Some(wss_block.slot())); // Reconstruct states. store.clone().reconstruct_historic_states().unwrap(); assert_eq!(store.get_anchor_slot(), None); } +/// Test that blocks and attestations that refer to states around an unaligned split state are +/// processed correctly. +#[tokio::test] +async fn process_blocks_and_attestations_for_unaligned_checkpoint() { + let temp = tempdir().unwrap(); + let store = get_store(&temp); + let chain_config = ChainConfig { + reconstruct_historic_states: false, + ..ChainConfig::default() + }; + let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config); + + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let split_slot = Slot::new(E::slots_per_epoch() * 4); + let pre_skips = 1; + let post_skips = 1; + + // Build the chain up to the intended split slot, with 3 skips before the split. + let slots = (1..=split_slot.as_u64() - pre_skips) + .map(Slot::new) + .collect::>(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + ) + .await; + + // Before the split slot becomes finalized, create two forking blocks that build on the split + // block: + // + // - one that is invalid because it conflicts with finalization (slot <= finalized_slot) + // - one that is valid because its slot is not finalized (slot > finalized_slot) + let (unadvanced_split_state, unadvanced_split_state_root) = + harness.get_current_state_and_root(); + + let (invalid_fork_block, _) = harness + .make_block(unadvanced_split_state.clone(), split_slot) + .await; + let (valid_fork_block, _) = harness + .make_block(unadvanced_split_state.clone(), split_slot + 1) + .await; + + // Advance the chain so that the intended split slot is finalized. + // Do not attest in the epoch boundary slot, to make attestation production later easier (no + // equivocations). + let finalizing_slot = split_slot + 2 * E::slots_per_epoch(); + for _ in 0..pre_skips + post_skips { + harness.advance_slot(); + } + harness.extend_to_slot(finalizing_slot - 1).await; + harness + .add_block_at_slot(finalizing_slot, harness.get_current_state()) + .await + .unwrap(); + + // Check that the split slot is as intended. + let split = store.get_split_info(); + assert_eq!(split.slot, split_slot); + assert_eq!(split.block_root, valid_fork_block.parent_root()); + assert_ne!(split.state_root, unadvanced_split_state_root); + + // Applying the invalid block should fail. + let err = harness + .chain + .process_block( + invalid_fork_block.canonical_root(), + Arc::new(invalid_fork_block.clone()), + NotifyExecutionLayer::Yes, + || Ok(()), + ) + .await + .unwrap_err(); + assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); + + // Applying the valid block should succeed, but it should not become head. + harness + .chain + .process_block( + valid_fork_block.canonical_root(), + Arc::new(valid_fork_block.clone()), + NotifyExecutionLayer::Yes, + || Ok(()), + ) + .await + .unwrap(); + harness.chain.recompute_head_at_current_slot().await; + assert_ne!(harness.head_block_root(), valid_fork_block.canonical_root()); + + // Attestations to the split block in the next 2 epochs should be processed successfully. + let attestation_start_slot = harness.get_current_slot(); + let attestation_end_slot = attestation_start_slot + 2 * E::slots_per_epoch(); + let (split_state_root, mut advanced_split_state) = harness + .chain + .store + .get_advanced_hot_state(split.block_root, split.slot, split.state_root) + .unwrap() + .unwrap(); + complete_state_advance( + &mut advanced_split_state, + Some(split_state_root), + attestation_start_slot, + &harness.chain.spec, + ) + .unwrap(); + advanced_split_state + .build_caches(&harness.chain.spec) + .unwrap(); + let advanced_split_state_root = advanced_split_state.update_tree_hash_cache().unwrap(); + for slot in (attestation_start_slot.as_u64()..attestation_end_slot.as_u64()).map(Slot::new) { + let attestations = harness.make_attestations( + &all_validators, + &advanced_split_state, + advanced_split_state_root, + split.block_root.into(), + slot, + ); + harness.advance_slot(); + harness.process_attestations(attestations); + } +} + #[tokio::test] async fn finalizes_after_resuming_from_db() { let validator_count = 16; @@ -2306,6 +2472,7 @@ async fn finalizes_after_resuming_from_db() { .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(store) + .testing_slot_clock(original_chain.slot_clock.clone()) .mock_execution_layer() .build(); @@ -2559,6 +2726,9 @@ async fn schema_downgrade_to_min_version() { SchemaVersion(11) }; + // Save the slot clock so that the new harness doesn't revert in time. + let slot_clock = harness.chain.slot_clock.clone(); + // Close the database to ensure everything is written to disk. drop(store); drop(harness); @@ -2589,11 +2759,21 @@ async fn schema_downgrade_to_min_version() { ) .expect("schema upgrade from minimum version should work"); - // Rescreate the harness. + // Recreate the harness. + /* + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(spec.seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + */ + let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) .logger(store.logger().clone()) + .testing_slot_clock(slot_clock) .resumed_disk_store(store.clone()) .mock_execution_layer() .build(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c5b2892cbd..8935c69926 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,7 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, + BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; @@ -28,6 +28,10 @@ lazy_static! { fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 72b6a6c7d4..0b517930fb 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" [dev-dependencies] serde_yaml = "0.8.13" -state_processing = { path = "../../consensus/state_processing" } operation_pool = { path = "../operation_pool" } tokio = "1.14.0" [dependencies] +state_processing = { path = "../../consensus/state_processing" } beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } network = { path = "../network" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index bd2946bb7a..b09cf9cca3 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -309,7 +309,6 @@ where config.chain.checkpoint_sync_url_timeout, )), ); - let slots_per_epoch = TEthSpec::slots_per_epoch(); let deposit_snapshot = if config.sync_eth1_chain { // We want to fetch deposit snapshot before fetching the finalized beacon state to @@ -356,10 +355,23 @@ where None }; - debug!(context.log(), "Downloading finalized block"); - // Find a suitable finalized block on an epoch boundary. - let mut block = remote - .get_beacon_blocks_ssz::(BlockId::Finalized, &spec) + debug!( + context.log(), + "Downloading finalized state"; + ); + let state = remote + .get_debug_beacon_states_ssz::(StateId::Finalized, &spec) + .await + .map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))? + .ok_or_else(|| "Checkpoint state missing from remote".to_string())?; + + debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot()); + + let finalized_block_slot = state.latest_block_header().slot; + + debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot); + let block = remote + .get_beacon_blocks_ssz::(BlockId::Slot(finalized_block_slot), &spec) .await .map_err(|e| match e { ApiError::InvalidSsz(e) => format!( @@ -373,65 +385,15 @@ where debug!(context.log(), "Downloaded finalized block"); - let mut block_slot = block.slot(); - - while block.slot() % slots_per_epoch != 0 { - block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; - - debug!( - context.log(), - "Searching for aligned checkpoint block"; - "block_slot" => block_slot - ); - - if let Some(found_block) = remote - .get_beacon_blocks_ssz::(BlockId::Slot(block_slot), &spec) - .await - .map_err(|e| { - format!("Error fetching block at slot {}: {:?}", block_slot, e) - })? - { - block = found_block; - } - } - - debug!( - context.log(), - "Downloaded aligned finalized block"; - "block_root" => ?block.canonical_root(), - "block_slot" => block.slot(), - ); - - let state_root = block.state_root(); - debug!( - context.log(), - "Downloading finalized state"; - "state_root" => ?state_root - ); - let state = remote - .get_debug_beacon_states_ssz::(StateId::Root(state_root), &spec) - .await - .map_err(|e| { - format!( - "Error loading checkpoint state from remote {:?}: {:?}", - state_root, e - ) - })? - .ok_or_else(|| { - format!("Checkpoint state missing from remote: {:?}", state_root) - })?; - - debug!(context.log(), "Downloaded finalized state"); - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; info!( context.log(), "Loaded checkpoint block and state"; - "slot" => block.slot(), + "block_slot" => block.slot(), + "state_slot" => state.slot(), "block_root" => ?block.canonical_root(), - "state_root" => ?state_root, ); let service = diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 3ae495378e..3c72441c02 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,7 +1,7 @@ use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, - BeaconChain, StateSkipConfig, WhenSlotSkipped, + BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; use environment::null_logger; use eth2::{ @@ -77,6 +77,7 @@ struct ApiTester { struct ApiTesterConfig { spec: ChainSpec, + retain_historic_states: bool, builder_threshold: Option, } @@ -86,11 +87,19 @@ impl Default for ApiTesterConfig { spec.shard_committee_period = 2; Self { spec, + retain_historic_states: false, builder_threshold: None, } } } +impl ApiTesterConfig { + fn retain_historic_states(mut self) -> Self { + self.retain_historic_states = true; + self + } +} + impl ApiTester { pub async fn new() -> Self { // This allows for testing voluntary exits without building out a massive chain. @@ -118,6 +127,10 @@ impl ApiTester { let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) + .chain_config(ChainConfig { + reconstruct_historic_states: config.retain_historic_states, + ..ChainConfig::default() + }) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() @@ -375,6 +388,7 @@ impl ApiTester { pub async fn new_mev_tester_no_builder_threshold() -> Self { let mut config = ApiTesterConfig { builder_threshold: Some(0), + retain_historic_states: false, spec: E::default_spec(), }; config.spec.altair_fork_epoch = Some(Epoch::new(0)); @@ -4705,7 +4719,7 @@ async fn get_validator_duties_attester_with_skip_slots() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_proposer() { - ApiTester::new() + ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states()) .await .test_get_validator_duties_proposer() .await; @@ -4713,7 +4727,7 @@ async fn get_validator_duties_proposer() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_proposer_with_skip_slots() { - ApiTester::new() + ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states()) .await .skip_slots(E::slots_per_epoch() * 2) .test_get_validator_duties_proposer() @@ -5045,6 +5059,7 @@ async fn builder_payload_chosen_by_profit() { async fn builder_works_post_capella() { let mut config = ApiTesterConfig { builder_threshold: Some(0), + retain_historic_states: false, spec: E::default_spec(), }; config.spec.altair_fork_epoch = Some(Epoch::new(0)); diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index cb4d6f9c2b..ac7479db01 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -2062,7 +2062,7 @@ impl NetworkBeaconProcessor { ); } AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError( - HotColdDBError::AttestationStateIsFinalized { .. }, + HotColdDBError::FinalizedStateNotInHotDatabase { .. }, ))) => { debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 7695ea520e..47839ed3e9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -14,7 +14,7 @@ use crate::memory_store::MemoryStore; use crate::metadata::{ AnchorInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY, - SCHEMA_VERSION_KEY, SPLIT_KEY, + SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::metrics; use crate::{ @@ -110,10 +110,10 @@ pub enum HotColdDBError { IterationError { unexpected_key: BytesKey, }, - AttestationStateIsFinalized { + FinalizedStateNotInHotDatabase { split_slot: Slot, - request_slot: Option, - state_root: Hash256, + request_slot: Slot, + block_root: Hash256, }, } @@ -545,7 +545,7 @@ impl, Cold: ItemStore> HotColdDB /// upon that state (e.g., state roots). Additionally, only states from the hot store are /// returned. /// - /// See `Self::get_state` for information about `slot`. + /// See `Self::get_advanced_hot_state` for information about `max_slot`. /// /// ## Warning /// @@ -557,23 +557,78 @@ impl, Cold: ItemStore> HotColdDB /// - `state.block_roots` pub fn get_inconsistent_state_for_attestation_verification_only( &self, - state_root: &Hash256, - slot: Option, - ) -> Result>, Error> { + block_root: &Hash256, + max_slot: Slot, + state_root: Hash256, + ) -> Result)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); + self.get_advanced_hot_state_with_strategy( + *block_root, + max_slot, + state_root, + StateProcessingStrategy::Inconsistent, + ) + } - let split_slot = self.get_split_slot(); + /// Get a state with `latest_block_root == block_root` advanced through to at most `max_slot`. + /// + /// The `state_root` argument is used to look up the block's un-advanced state in case an + /// advanced state is not found. + /// + /// Return the `(result_state_root, state)` satisfying: + /// + /// - `result_state_root == state.canonical_root()` + /// - `state.slot() <= max_slot` + /// - `state.get_latest_block_root(result_state_root) == block_root` + /// + /// Presently this is only used to avoid loading the un-advanced split state, but in future will + /// be expanded to return states from an in-memory cache. + pub fn get_advanced_hot_state( + &self, + block_root: Hash256, + max_slot: Slot, + state_root: Hash256, + ) -> Result)>, Error> { + self.get_advanced_hot_state_with_strategy( + block_root, + max_slot, + state_root, + StateProcessingStrategy::Accurate, + ) + } - if slot.map_or(false, |slot| slot < split_slot) { - Err(HotColdDBError::AttestationStateIsFinalized { - split_slot, - request_slot: slot, - state_root: *state_root, + /// Same as `get_advanced_hot_state` but taking a `StateProcessingStrategy`. + pub fn get_advanced_hot_state_with_strategy( + &self, + block_root: Hash256, + max_slot: Slot, + state_root: Hash256, + state_processing_strategy: StateProcessingStrategy, + ) -> Result)>, Error> { + // Hold a read lock on the split point so it can't move while we're trying to load the + // state. + let split = self.split.read_recursive(); + + // Sanity check max-slot against the split slot. + if max_slot < split.slot { + return Err(HotColdDBError::FinalizedStateNotInHotDatabase { + split_slot: split.slot, + request_slot: max_slot, + block_root, } - .into()) - } else { - self.load_hot_state(state_root, StateProcessingStrategy::Inconsistent) + .into()); } + + let state_root = if block_root == split.block_root && split.slot <= max_slot { + split.state_root + } else { + state_root + }; + let state = self + .load_hot_state(&state_root, state_processing_strategy)? + .map(|state| (state_root, state)); + drop(split); + Ok(state) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -1180,8 +1235,12 @@ impl, Cold: ItemStore> HotColdDB *self.split.read_recursive() } - pub fn set_split(&self, slot: Slot, state_root: Hash256) { - *self.split.write() = Split { slot, state_root }; + pub fn set_split(&self, slot: Slot, state_root: Hash256, block_root: Hash256) { + *self.split.write() = Split { + slot, + state_root, + block_root, + }; } /// Fetch the slot of the most recently stored restore point. @@ -1216,25 +1275,36 @@ impl, Cold: ItemStore> HotColdDB } /// Initialise the anchor info for checkpoint sync starting from `block`. - pub fn init_anchor_info(&self, block: BeaconBlockRef<'_, E>) -> Result { + pub fn init_anchor_info( + &self, + block: BeaconBlockRef<'_, E>, + retain_historic_states: bool, + ) -> Result { let anchor_slot = block.slot(); let slots_per_restore_point = self.config.slots_per_restore_point; - // Set the `state_upper_limit` to the slot of the *next* restore point. - // See `get_state_upper_limit` for rationale. - let next_restore_point_slot = if anchor_slot % slots_per_restore_point == 0 { + let state_upper_limit = if !retain_historic_states { + STATE_UPPER_LIMIT_NO_RETAIN + } else if anchor_slot % slots_per_restore_point == 0 { anchor_slot } else { + // Set the `state_upper_limit` to the slot of the *next* restore point. + // See `get_state_upper_limit` for rationale. (anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point }; - let anchor_info = AnchorInfo { - anchor_slot, - oldest_block_slot: anchor_slot, - oldest_block_parent: block.parent_root(), - state_upper_limit: next_restore_point_slot, - state_lower_limit: self.spec.genesis_slot, + let anchor_info = if state_upper_limit == 0 && anchor_slot == 0 { + // Genesis archive node: no anchor because we *will* store all states. + None + } else { + Some(AnchorInfo { + anchor_slot, + oldest_block_slot: anchor_slot, + oldest_block_parent: block.parent_root(), + state_upper_limit, + state_lower_limit: self.spec.genesis_slot, + }) }; - self.compare_and_set_anchor_info(None, Some(anchor_info)) + self.compare_and_set_anchor_info(None, anchor_info) } /// Get a clone of the store's anchor info. @@ -1361,11 +1431,26 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.put(&CONFIG_KEY, &self.config.as_disk_config()) } - /// Load the split point from disk. - fn load_split(&self) -> Result, Error> { + /// Load the split point from disk, sans block root. + fn load_split_partial(&self) -> Result, Error> { self.hot_db.get(&SPLIT_KEY) } + /// Load the split point from disk, including block root. + fn load_split(&self) -> Result, Error> { + match self.load_split_partial()? { + Some(mut split) => { + // Load the hot state summary to get the block root. + let summary = self.load_hot_state_summary(&split.state_root)?.ok_or( + HotColdDBError::MissingSplitState(split.state_root, split.slot), + )?; + split.block_root = summary.latest_block_root; + Ok(Some(split)) + } + None => Ok(None), + } + } + /// Stage the split for storage to disk. pub fn store_split_in_batch(&self) -> KeyValueStoreOp { self.split.read_recursive().as_kv_store_op(SPLIT_KEY) @@ -1611,42 +1696,40 @@ impl, Cold: ItemStore> HotColdDB /// Advance the split point of the store, moving new finalized states to the freezer. pub fn migrate_database, Cold: ItemStore>( store: Arc>, - frozen_head_root: Hash256, - frozen_head: &BeaconState, + finalized_state_root: Hash256, + finalized_block_root: Hash256, + finalized_state: &BeaconState, ) -> Result<(), Error> { debug!( store.log, "Freezer migration started"; - "slot" => frozen_head.slot() + "slot" => finalized_state.slot() ); // 0. Check that the migration is sensible. - // The new frozen head must increase the current split slot, and lie on an epoch + // The new finalized state must increase the current split slot, and lie on an epoch // boundary (in order for the hot state summary scheme to work). let current_split_slot = store.split.read_recursive().slot; - let anchor_slot = store - .anchor_info - .read_recursive() - .as_ref() - .map(|a| a.anchor_slot); + let anchor_info = store.anchor_info.read_recursive().clone(); + let anchor_slot = anchor_info.as_ref().map(|a| a.anchor_slot); - if frozen_head.slot() < current_split_slot { + if finalized_state.slot() < current_split_slot { return Err(HotColdDBError::FreezeSlotError { current_split_slot, - proposed_split_slot: frozen_head.slot(), + proposed_split_slot: finalized_state.slot(), } .into()); } - if frozen_head.slot() % E::slots_per_epoch() != 0 { - return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot()).into()); + if finalized_state.slot() % E::slots_per_epoch() != 0 { + return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into()); } let mut hot_db_ops: Vec> = Vec::new(); - // 1. Copy all of the states between the head and the split slot, from the hot DB + // 1. Copy all of the states between the new finalized state and the split slot, from the hot DB // to the cold DB. Delete the execution payloads of these now-finalized blocks. - let state_root_iter = RootsIterator::new(&store, frozen_head); + let state_root_iter = RootsIterator::new(&store, finalized_state); for maybe_tuple in state_root_iter.take_while(|result| match result { Ok((_, _, slot)) => { slot >= ¤t_split_slot @@ -1656,6 +1739,29 @@ pub fn migrate_database, Cold: ItemStore>( }) { let (block_root, state_root, slot) = maybe_tuple?; + // Delete the execution payload if payload pruning is enabled. At a skipped slot we may + // delete the payload for the finalized block itself, but that's OK as we only guarantee + // that payloads are present for slots >= the split slot. The payload fetching code is also + // forgiving of missing payloads. + if store.config.prune_payloads { + hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); + } + + // Delete the old summary, and the full state if we lie on an epoch boundary. + hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); + + // Do not try to store states if a restore point is yet to be stored, or will never be + // stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state + // which always needs to be copied from the hot DB to the freezer and should not be deleted. + if slot != 0 + && anchor_info + .as_ref() + .map_or(false, |anchor| slot < anchor.state_upper_limit) + { + debug!(store.log, "Pruning finalized state"; "slot" => slot); + continue; + } + let mut cold_db_ops: Vec = Vec::new(); if slot % store.config.slots_per_restore_point == 0 { @@ -1674,17 +1780,6 @@ pub fn migrate_database, Cold: ItemStore>( // There are data dependencies between calls to `store_cold_state()` that prevent us from // doing one big call to `store.cold_db.do_atomically()` at end of the loop. store.cold_db.do_atomically(cold_db_ops)?; - - // Delete the old summary, and the full state if we lie on an epoch boundary. - hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); - - // Delete the execution payload if payload pruning is enabled. At a skipped slot we may - // delete the payload for the finalized block itself, but that's OK as we only guarantee - // that payloads are present for slots >= the split slot. The payload fetching code is also - // forgiving of missing payloads. - if store.config.prune_payloads { - hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); - } } // Warning: Critical section. We have to take care not to put any of the two databases in an @@ -1724,8 +1819,9 @@ pub fn migrate_database, Cold: ItemStore>( // Before updating the in-memory split value, we flush it to disk first, so that should the // OS process die at this point, we pick up from the right place after a restart. let split = Split { - slot: frozen_head.slot(), - state_root: frozen_head_root, + slot: finalized_state.slot(), + state_root: finalized_state_root, + block_root: finalized_block_root, }; store.hot_db.put_sync(&SPLIT_KEY, &split)?; @@ -1741,7 +1837,7 @@ pub fn migrate_database, Cold: ItemStore>( debug!( store.log, "Freezer migration complete"; - "slot" => frozen_head.slot() + "slot" => finalized_state.slot() ); Ok(()) @@ -1750,8 +1846,16 @@ pub fn migrate_database, Cold: ItemStore>( /// Struct for storing the split slot and state root in the database. #[derive(Debug, Clone, Copy, PartialEq, Default, Encode, Decode, Deserialize, Serialize)] pub struct Split { - pub(crate) slot: Slot, - pub(crate) state_root: Hash256, + pub slot: Slot, + pub state_root: Hash256, + /// The block root of the split state. + /// + /// This is used to provide special handling for the split state in the case where there are + /// skipped slots. The split state will *always* be the advanced state, so callers + /// who only have the finalized block root should use `get_advanced_hot_state` to get this state, + /// rather than fetching `block.state_root()` (the unaligned state) which will have been pruned. + #[ssz(skip_serializing, skip_deserializing)] + pub block_root: Hash256, } impl StoreItem for Split { diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 6f50d7038f..ccfddcf8f8 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -16,6 +16,9 @@ pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4); pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); +/// State upper limit value used to indicate that a node is not storing historic states. +pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct SchemaVersion(pub u64); diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 5788382894..0c375a5f00 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -75,7 +75,7 @@ Once backfill is complete, a `INFO Historical block download complete` log will > Note: Since [v4.1.0](https://github.com/sigp/lighthouse/releases/tag/v4.1.0), Lighthouse implements rate-limited backfilling to mitigate validator performance issues after a recent checkpoint sync. This means that the speed at which historical blocks are downloaded is limited, typically to less than 20 slots/sec. This will not affect validator performance. However, if you would still prefer to sync the chain as fast as possible, you can add the flag `--disable-backfill-rate-limiting` to the beacon node. -> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node. +> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node. ## FAQ @@ -116,8 +116,9 @@ states: database. Additionally, the genesis block is always available. * `state_lower_limit`: All states with slots _less than or equal to_ this value are available in the database. The minimum value is 0, indicating that the genesis state is always available. -* `state_upper_limit`: All states with slots _greater than or equal to_ this value are available - in the database. +* `state_upper_limit`: All states with slots _greater than or equal to_ `min(split.slot, + state_upper_limit)` are available in the database. In the case where the `state_upper_limit` is + higher than the `split.slot`, this means states are not being written to the freezer database. Reconstruction runs from the state lower limit to the upper limit, narrowing the window of unavailable states as it goes. It will log messages like the following to show its progress: @@ -153,18 +154,8 @@ To manually specify a checkpoint use the following two flags: * `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob * `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob -_Both_ the state and block must be provided and **must** adhere to the [Alignment -Requirements](#alignment-requirements) described below. - -### Alignment Requirements - -* The block must be a finalized block from an epoch boundary, i.e. `block.slot() % 32 == 0`. -* The state must be the state corresponding to `block` with `state.slot() == block.slot()` - and `state.hash_tree_root() == block.state_root()`. - -These requirements are imposed to align with Lighthouse's database schema, and notably exclude -finalized blocks from skipped slots. You can avoid alignment issues by using -[Automatic Checkpoint Sync](#automatic-checkpoint-sync), which will search for a suitable block -and state pair. +_Both_ the state and block must be provided and the state **must** match the block. The +state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary, +in which case it will be assumed to be finalized at that epoch. [weak-subj]: https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/ diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 4f563f8639..b3749ea9d8 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -355,7 +355,7 @@ where spec: &ChainSpec, ) -> Result> { // Sanity check: the anchor must lie on an epoch boundary. - if anchor_block.slot() % E::slots_per_epoch() != 0 { + if anchor_state.slot() % E::slots_per_epoch() != 0 { return Err(Error::InvalidAnchor { block_slot: anchor_block.slot(), state_slot: anchor_state.slot(), @@ -391,6 +391,7 @@ where let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); let proto_array = ProtoArrayForkChoice::new::( + current_slot, finalized_block_slot, finalized_block_state_root, *fc_store.justified_checkpoint(), diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 157f072ad3..98d43e4850 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -80,6 +80,7 @@ impl ForkChoiceTestDefinition { let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); let mut fork_choice = ProtoArrayForkChoice::new::( + self.finalized_block_slot, self.finalized_block_slot, Hash256::zero(), self.justified_checkpoint, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index fe831b3c35..5911e50fcd 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -345,6 +345,7 @@ pub struct ProtoArrayForkChoice { impl ProtoArrayForkChoice { #[allow(clippy::too_many_arguments)] pub fn new( + current_slot: Slot, finalized_block_slot: Slot, finalized_block_state_root: Hash256, justified_checkpoint: Checkpoint, @@ -380,7 +381,7 @@ impl ProtoArrayForkChoice { }; proto_array - .on_block::(block, finalized_block_slot) + .on_block::(block, current_slot) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; Ok(Self { @@ -983,6 +984,7 @@ mod test_compute_deltas { }; let mut fc = ProtoArrayForkChoice::new::( + genesis_slot, genesis_slot, state_root, genesis_checkpoint, @@ -1108,6 +1110,7 @@ mod test_compute_deltas { }; let mut fc = ProtoArrayForkChoice::new::( + genesis_slot, genesis_slot, junk_state_root, genesis_checkpoint, diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index ead06edbf5..e16fb4a7b1 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -21,7 +21,7 @@ impl From for Error { /// /// If the root of the supplied `state` is known, then it can be passed as `state_root`. If /// `state_root` is `None`, the root of `state` will be computed using a cached tree hash. -/// Providing the `state_root` makes this function several orders of magniude faster. +/// Providing the `state_root` makes this function several orders of magnitude faster. pub fn per_slot_processing( state: &mut BeaconState, state_root: Option, diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 9627d2cde0..c4f288a8aa 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,7 +7,7 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, NotifyExecutionLayer, + BeaconChainTypes, CachedHead, ChainConfig, NotifyExecutionLayer, }; use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; use serde::Deserialize; @@ -303,6 +303,10 @@ impl Tester { let harness = BeaconChainHarness::builder(E::default()) .spec(spec.clone()) .keypairs(vec![]) + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .genesis_state_ephemeral_store(case.anchor_state.clone()) .mock_execution_layer() .recalculate_fork_times_with_genesis(0) diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 394f8558fa..3cd8205eb6 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -115,6 +115,9 @@ pub fn testing_client_config() -> ClientConfig { genesis_time: now, }; + // Simulator tests expect historic states to be available for post-run checks. + client_config.chain.reconstruct_historic_states = true; + // Specify a constant count of beacon processor workers. Having this number // too low can cause annoying HTTP timeouts, especially on Github runners // with 2 logical CPUs. diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index af1cde26b7..9032b124ab 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -1,8 +1,9 @@ #![recursion_limit = "256"] #![cfg(unix)] -use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +use beacon_chain::{ + test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, + ChainConfig, }; use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use http_api::test_utils::{create_api_server, ApiServer}; @@ -91,6 +92,10 @@ impl TesterBuilder { pub async fn new() -> TesterBuilder { let harness = BeaconChainHarness::builder(E::default()) .default_spec() + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .build(); From b3e1c297c8e0ab2e62b3a5068576d94f0e0585ab Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 21 Aug 2023 05:02:33 +0000 Subject: [PATCH 56/75] `ForkChoice`: remove an impossible case for `parent_checkpoints` (#4626) `parent_finalized.epoch + 1 > block_epoch` will never be `true` since as the comment says: ``` A block in epoch `N` cannot contain attestations which would finalize an epoch higher than `N - 1`. ``` --- consensus/fork_choice/src/fork_choice.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index b3749ea9d8..ea3a58127b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -750,7 +750,7 @@ where .unrealized_justified_checkpoint .zip(parent_block.unrealized_finalized_checkpoint) .filter(|(parent_justified, parent_finalized)| { - parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch + parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 == block_epoch }); let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some(( From 524d9af2884a53eee1e3c5c07db85ac4f6a6df8d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Aug 2023 05:02:34 +0000 Subject: [PATCH 57/75] Fix beacon-processor-max-workers (#4636) ## Issue Addressed Fixes a bug in the handling of `--beacon-process-max-workers` which caused it to have no effect. ## Proposed Changes For this PR I channeled @ethDreamer and saw deep into the faulty CLI config -- this bug is almost identical to the one Mark found and fixed in #4622. --- Cargo.lock | 2 -- beacon_node/beacon_processor/src/lib.rs | 5 ++--- beacon_node/client/Cargo.toml | 1 - beacon_node/client/src/builder.rs | 2 -- beacon_node/http_api/src/test_utils.rs | 14 ++++++++------ beacon_node/network/Cargo.toml | 1 - .../network/src/network_beacon_processor/tests.rs | 2 -- 7 files changed, 10 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 116f8081fa..9a8cf48336 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1053,7 +1053,6 @@ dependencies = [ "logging", "monitoring_api", "network", - "num_cpus", "operation_pool", "parking_lot 0.12.1", "sensitive_url", @@ -5037,7 +5036,6 @@ dependencies = [ "logging", "lru_cache", "matches", - "num_cpus", "operation_pool", "parking_lot 0.12.1", "rand 0.8.5", diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index bf5d8bced4..4c1da85fa5 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -708,7 +708,6 @@ impl Stream for InboundEvents { pub struct BeaconProcessor { pub network_globals: Arc>, pub executor: TaskExecutor, - pub max_workers: usize, pub current_workers: usize, pub config: BeaconProcessorConfig, pub log: Logger, @@ -721,7 +720,7 @@ impl BeaconProcessor { /// - Performed immediately, if a worker is available. /// - Queued for later processing, if no worker is currently available. /// - /// Only `self.max_workers` will ever be spawned at one time. Each worker is a `tokio` task + /// Only `self.config.max_workers` will ever be spawned at one time. Each worker is a `tokio` task /// started with `spawn_blocking`. /// /// The optional `work_journal_tx` allows for an outside process to receive a log of all work @@ -896,7 +895,7 @@ impl BeaconProcessor { let _ = work_journal_tx.try_send(id); } - let can_spawn = self.current_workers < self.max_workers; + let can_spawn = self.current_workers < self.config.max_workers; let drop_during_sync = work_event .as_ref() .map_or(false, |event| event.drop_during_sync); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 0b517930fb..87e1650902 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -44,4 +44,3 @@ slasher_service = { path = "../../slasher/service" } monitoring_api = {path = "../../common/monitoring_api"} execution_layer = { path = "../execution_layer" } beacon_processor = { path = "../beacon_processor" } -num_cpus = "1.13.0" diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index b09cf9cca3..4de6d12622 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -29,7 +29,6 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; -use std::cmp; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -757,7 +756,6 @@ where BeaconProcessor { network_globals: network_globals.clone(), executor: beacon_processor_context.executor.clone(), - max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, config: beacon_processor_config, log: beacon_processor_context.log().clone(), diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index dcc2532229..b1478f3f29 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -184,7 +184,14 @@ pub async fn create_api_server_on_port( let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); - let beacon_processor_config = BeaconProcessorConfig::default(); + let beacon_processor_config = BeaconProcessorConfig { + // The number of workers must be greater than one. Tests which use the + // builder workflow sometimes require an internal HTTP request in order + // to fulfill an already in-flight HTTP request, therefore having only + // one worker will result in a deadlock. + max_workers: 2, + ..BeaconProcessorConfig::default() + }; let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, @@ -196,11 +203,6 @@ pub async fn create_api_server_on_port( BeaconProcessor { network_globals: network_globals.clone(), executor: test_runtime.task_executor.clone(), - // The number of workers must be greater than one. Tests which use the - // builder workflow sometimes require an internal HTTP request in order - // to fulfill an already in-flight HTTP request, therefore having only - // one worker will result in a deadlock. - max_workers: 2, current_workers: 0, config: beacon_processor_config, log: log.clone(), diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index c37b0fa45d..715e77bcad 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -36,7 +36,6 @@ logging = { path = "../../common/logging" } task_executor = { path = "../../common/task_executor" } igd = "0.12.1" itertools = "0.10.0" -num_cpus = "1.13.0" lru_cache = { path = "../../common/lru_cache" } if-addrs = "0.6.4" strum = "0.24.0" diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index ce5b914117..a678edbf1f 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -20,7 +20,6 @@ use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, }; use slot_clock::SlotClock; -use std::cmp; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; @@ -228,7 +227,6 @@ impl TestRig { let beacon_processor = BeaconProcessor { network_globals, executor, - max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, config: beacon_processor_config, log: log.clone(), From 91f3bc274b063921e41c7a37386e86680697e875 Mon Sep 17 00:00:00 2001 From: Aoi Kurokawa Date: Mon, 21 Aug 2023 05:02:35 +0000 Subject: [PATCH 58/75] Fix the link of anvil in lighthouse book (#4641) ## Issue Addressed Wrong link for anvil ## Proposed Changes Fix the link to correct one. --- book/src/setup.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/src/setup.md b/book/src/setup.md index 533e1d463d..1ae6e63540 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -9,7 +9,7 @@ particularly useful for development but still a good way to ensure you have the base dependencies. The additional requirements for developers are: -- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil). This is used to +- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil). This is used to simulate the execution chain during tests. You'll get failures during tests if you don't have `anvil` available on your `PATH`. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by From f92b856cd1b25b7e51a35747e613a5f22e6973d3 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 21 Aug 2023 05:02:36 +0000 Subject: [PATCH 59/75] Remove Antithesis docker build workflow (#4642) ## Issue Addressed This PR remove Antithesis docker build CI workflow. Confirmed with Antithesis team that this is no longer used. --- .github/workflows/docker-antithesis.yml | 35 ------------------ testing/antithesis/Dockerfile.libvoidstar | 25 ------------- testing/antithesis/libvoidstar/libvoidstar.so | Bin 348192 -> 0 bytes 3 files changed, 60 deletions(-) delete mode 100644 .github/workflows/docker-antithesis.yml delete mode 100644 testing/antithesis/Dockerfile.libvoidstar delete mode 100644 testing/antithesis/libvoidstar/libvoidstar.so diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml deleted file mode 100644 index a96431fafb..0000000000 --- a/.github/workflows/docker-antithesis.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: docker antithesis - -on: - push: - branches: - - unstable - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -env: - ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }} - ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }} - ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }} - REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }} - IMAGE_NAME: lighthouse - TAG: libvoidstar - -jobs: - build-docker: - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@v3 - - name: Update Rust - run: rustup update stable - - name: Dockerhub login - run: | - echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin - - name: Build AMD64 dockerfile (with push) - run: | - docker build \ - --tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \ - --file ./testing/antithesis/Dockerfile.libvoidstar . - docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar deleted file mode 100644 index c790e248df..0000000000 --- a/testing/antithesis/Dockerfile.libvoidstar +++ /dev/null @@ -1,25 +0,0 @@ -FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev -COPY . lighthouse - -# Build lighthouse directly with a cargo build command, bypassing the Makefile. -RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov-module -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse -# build lcli binary directly with cargo install command, bypassing the makefile -RUN cargo install --path /lighthouse/lcli --force --locked - -FROM ubuntu:latest -RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ - libssl-dev \ - ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# create and move the libvoidstar file -RUN mkdir libvoidstar -COPY --from=builder /lighthouse/testing/antithesis/libvoidstar/libvoidstar.so /usr/lib/libvoidstar.so - -# set the env variable to avoid having to always set it -ENV LD_LIBRARY_PATH=/usr/lib -# move the lighthouse binary and lcli binary -COPY --from=builder /lighthouse/target/x86_64-unknown-linux-gnu/release/lighthouse /usr/local/bin/lighthouse -COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli \ No newline at end of file diff --git a/testing/antithesis/libvoidstar/libvoidstar.so b/testing/antithesis/libvoidstar/libvoidstar.so deleted file mode 100644 index 0f8a0f23c3fb7a349788bc8d5532dc71bb821c68..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 348192 zcmeFa3!Ga=dG|lN>rI3c66FFYB!QCzC$i&ok+I__z9fQ>NZOUwlD#Vl zNo&g~7ZoT7UT7Bxt+=!mkWv*A5O+}msFW7H6sTxvKY~Ilcqxc-6L)dV`=C^Lf8q2tMO$aHp}!(EjNQsGoF@A}VNRG#zK{REIsp0DLn zcDb&)PFd`$K7ov_JlBqhVQqd*?UPbIWjZsTUF{{)_v~jf7pab@i&aoPj3JGg!+@?|rq`&`pr+M_&e!*k#TRqiU`xfn+lIf;hQAE_GRygB z^N(!e|FI4KQyczf8~!dE{(c+&2^;=t8~#r={PQ;an>PIWHvDHcd<2bnsd{nPaHkD_ zk_~^V4R_mcj}7;b(35{{mmCpLw-S{IU(N*zot; z@Xy%rFWT^L+VJn$@SY8S%!Xfz!Evd2d7=$}iVa_5!*8|WciHfhHvDRH+K)e+?Aa*ntxc^FX;HxHkz7OM^*egbo?)9 zZj2}nv*zbVns@blXp`pm>HK)~d9iDLGL(PD&uTjX&B^|2G!HYs{!H_N&gTu<{v(=~ zS^0#?4zvHCntQZgWal!SPlJ`u4Vu?kes*duSbUu=l+G9e|8EuMT=KtB%q{t`<@PVo z7FUT2VL=A6b%ANifqAFel9nE4GKw*Rx&2E18yBxeMaZ@%r==sX@j~pU`}-r}EcOf7 zm!J73CmttfpmJ_0<6n_Tpy4^h$8jyn{oGaZ_yyuU=riU1mFDp=5kMWt{Rdz5$G95C zMH^))Kg!;!zP*f@{z7pP> zfiJE*N5pmRr^&cS5KbUG6QQ%|*!x?z#qPT5+wcG0me)P0QvS-l&tLzZKXhN&Oz&{! z)Bjo=yX5rCy0^daW7)*#epcMH<0bp=IeE7Cf1dxDSG?#mM}JuTi;w-)1wY;vy6$6} zuK9BQe%O=Y9)vvz@^ctLo=>{C1>xBU^pW}9fUr}8yP!`**pDEu!_*tGF)DxPv)8h>8Qag$`#uZ5%CaptuS5tSY(rR!a1h}pg!>TW z=jjNO=GJfj5A~sFl;#J`R8W7bo1i{2kc$SJ{VmzHT?p49Jf%;CxLR-DXkoNT_hqzC zx>0Z766=3tTbCai*KWo34wDG;e(hSlEyZKmmYcWh{dLd__4aD$EqeP>trtV(E$7n^ za8Zk=BHWE2KYoOuxuxeAo05tArfe>iiEYYeqM6N}c>c_UXKwO=bpDo^$#l{?vpsrh z;$(jF_H7%^q!Q6=E|S?ao0!_RWyf!t)S7Vfne85L^4#RPv&qCQlXFuO(Q_W}sm60^Qoj5X;SUUX!(OXV!nTu}m+%h+{`E(*N;mw}*OlP-6W+x&uC!cdlrk_uqO(df> z>F2gg#j|tX*>vXg17|X)PCh5&IXydb`b=hWA~KuZvf-r6{f5bn6Sv5$3xv6i+0@2u zaw|G3Hz(z0Iv3BxB2l^Tk^9psQYdtCE|m<)63>M~B6RTh9ieC}6FZs6=3<%SckG=_ zC1b}UQ?oI3Z0RGR>3k$KlSoEp6A#41?S~KEwl_4nadP8U5ej7^$wUq~6Piw)3FR`8 z=~yT|9XdH5$wZeq97-UmjFXE@pAJpOPlskAiP^sWEg}@lBSI{nTgvv*SlOJJf8c@8 z)VW+NE6mO5R3aIgnN6hCZY+~YWkS(NE@IuEi)FKslQE6tna#8_v-8=w+B_M{g))(3 zG&R@XpN>Z|YA2DLNm*=~yF(^VpV>SaN=FhIovC#4q)M5E*=%Y)gOW=|=3)Y;nU?>= zl4pc0BI$iBgRF(-Qqg%-RWg-J%$)0Ekx$57L# zmK^F)UF4xua(b|XhSE=i7J$qn<1-eIkk)Jpp-?27jiDbfuQE87JC}}yrlc=}j3ceC zFSDRRp)*!fk*QQBC#H4NQddTFHk3eLg8YTj@n}XwXCqOFP9|o~W^lNl^~_mW4xvnR zs5mrbg{0}(j2V|IP>nqLxoUshr01-dPs$JRH0Gyjf0$P_ck=l9e!;=)7tW|r*I@C3 zYm{9#Rwg;HO2!!R`pwFJgj+x@deGBjFbH~r~Gm=9@hErFzy^vH&I^34b6RwS9Lk~8K?XQ7^nOP8K?Y* z887Jkrx~YqlV{wg^I2d#pm~w;y3YRs&SL;X*iaq&|Xzr#3Pr(MQvhC<7B7AxcIp$pEBcQr@}bdsWR@^K-FJ5I)Z+K!8HvSTn# zcHE2?v>gxQWXH=m+3_)6*LM7jlbrzLWGBeD_@yeJFymw=&N$ggGw#!N@{E(60^?+- z$aq29Szw&(lo%&FWyb5;PK9x@Q)Qg&)EF1PQsq-;oa{6hCp%5XecDcoakA59oa}TM zFK9bm#>q~Pak3*;4Y&Wgw&P%&>^K=GJ1)k>W2$@%#>tMGakArK+^6k$87DhF#>tMK z@q)G!V4Um(87Dho#_QTnoN=;~W}NKg8JADt<)^?n*(owkb`}`-X*(sx$xfMZvQuHa zpzTx{Cp$I9$xfZ|y0+6`oa{6iCp#_1gNyl z&~}`RlN}f1WXE8tM4@wT?(XPoQ=7$-YH#*J}RK4HemPMmSFlV&`u?c^CJI|atcPLc7lwzI%E z*(otjcFK&mwVev%WT(nF*{LyZT%gLQ&N$g=Fiv)wjEA+I7UN{6%{bZVFkaSnx{Q;Z z9^+(3TsYkR+uDwUakArNob0$5H!f7=V=zv3+>DbQ5948N$ICd`@i9(z{EU~iodDxx zC&)P22{Yc-cH)eaoiyWQC(pRyP~}r#oa_`CCp!y_hqav&<7B7IIN7N%Ue?7{lX0@+ zVw~(4jJLHNH{)c-%Q!vf@-c3#R?pM?HauX%gN%E$oiO9Zgt8xJ+^6HG84qZlXS{v6 zvQuE(^902g7#I3EMTv3pw`{{JHoVF>`CDT={Y;ggI^*PTgK_e=$#_%y+hRO@g|gpa z+@<~PGEV;XY`A#baJ!}W4#vq}C*y&=DnBm9$zOwU^4HCHUHj`{y!k|B-^aK|`|D@C zeYJ`oV7zdR;z7oR=3&Ok-#Fv-2b7&O<6&yIjFZ0w#;e-jBI9lCZ;5fg_P5M9)mO!a zS8aHWaq_p$c==}KZ-a5F-zMYaZ;SD=_P5QszJ9xmhqb>w#>ro?+NRywa3|yBuZ!{W zCgrceIQi>loc#4LzM%c}GG2I+vhQa+ul)@$PW}dMc-V%=87F_!j2G6a{Nx!Ye+!JK zbvrLIUeNw7FkaUERhjVx?Qex~^0#WkYc{;jIQiRP+_zi#+vHsP)nc6Lx6OE3``clB zLDz4Oae7Zci#6%}JNfI-i#R!V+He=+kZ8E-axAM2eIMr{Paq_prxKI1rWxTHa6&DZpxBd4+jFtvkD!DYh@#>rnd zw9THhKocv8QPJZPXr}zcN$=@R5&UL!oGTwfk;w8q(-!kJ}`GpjG zDvW!yzct3=+TS|kB+EX@Ap<7j!x28K-h6 zFi!D{jFZ0$jCarJcFXv}gzCr2jFZ0=#_QVOD&ti>{?<9y{x%pVznV6@Wy9NylfNCt z#RlbXmvNsi=N{wauefx$-Bz`~4#pSseA2}@tphL^Cx6{G++)MNjFZ1U#+%!ezkbHa z-vHy}Z;m+^O4hfN}CS$T-z+nDMmsH_o_6`bJr;`CDbYs_VDLcv$;eXS}NWt0v<ev*CWm$=?9u-K_FA$T;~MW}N(u zGw##=rWx<*_FP~*t^F-B9=J;7XMyqRwThP*7n+wDuj_VLVO(6K>{J=|XetCQ`Riib(Eb{XFKB-~ zjMI8AFXQB|&xZSLcz|*8H^_MP7Ugf4aq>6LIQg4q+^PM|Gw!)k*)K9q>(Uk&Cx1&e zyllfOjFZ1r#=}Qderk-9zjemR-v;AC``ctZt?ReVIIW-SFz(UsgSw1|^>x}~ysG1i zD~8)`L0^v!#_O6p85b9;a&R$D{u+#TohrVY@v5F*c^Rj5em=&je*HE)V8erqQ~ib+ zH`c0r#u=yjO*8J%<&bB*t^F-9UeNWsz_`%sv`UPVzhxU!G49d+`WdJE2W)uIhKCs^f8&f-x2gO3G~?uNp7F3tl}~~3s`j_YcwN_T ziE+R7x6C;CTe0C)8(w3a{H-(YlRwObPlIzkKWQ@F)c&>@FKd6>jQe!`b{P+Ae|wCR zzv2lt?be1n87F^Tj0fgaeHn~X{kjt{T#{S7cq{swJ$*oMa$ zCx6q7FX;X`&p7#8V4VCdGG5UBE^w~>Ei=BL{jD%g{#I>x&4$+*Cx07^8;4c>HW?>> zTa1&xZN}5u-wxwl?Qf59S|=@@XwzI1#zi!6KUk~Htub1(#_SeU_ z(ARH(aazwEWSr_ZY{TO=Jk2=yn`hj2K=-$t>+7`0IQhH4x%RijxKsOEVVu^TR~aXN zYc{-Y!yAl~zfH!|^UB{A<5a(G#>w9f<38 z-HfO2Q2u%tCx5++lfOR3J=$MC;|u!!D9E@&uX_$NPX5Mic-n^N87F@WjJKbq{4Fw0 z{w^?1{+1Xww7+G>o7&$hNdP#!<&qgzb(d_)5_mAVb-0Gxvse4;WSr{PWy1{{?q;0)^)PPSs{HjbPX78BCx88n3+-=!@q+d@%y>Zi z8)uySP22Fi4KFZG{uUVz?^FIRFi!rK7$<+rjCU_pb}Ee5wZApS4(Ehp@Cw~na?zZ6`#>rnV#^Zp8}4JA{Pi>53@LvDjFZ1X z#>w9><7w@0obj-(-#p`P?Qel`^0#Qi7i@Tmaq_p!c>9R*x57C2TVwBd4e!|SF5~2HkMZt{mA~Se;dV>@Iv6K^ot$fbU5xv5dv-G()c$%H zCx5**+-Jl6jFZ0s#sl&XM&J`tNb$zPuh_uKFQ&A*PR6NzU5t~z2IJjJRJpksZ_@i;#_4;`KE}yk zzYP!A@F3&lZT}zCUzhxUrnN=Sk(Si*fST zV4VDQGhWyJdKee_`t>q?QOg|mg^zLiy+=Re?$zpd00NBD_v?d<`*i#;=XyWRIN3=v z9@KX7j8nb~jFX)r<7sVYfpN-5nQ^jTVZ5tNI~WgZ?qZzE-C#VeV?4%j5Q~P9`{3$R_c8ZL*seLj|c1nzsoigL% zB6Xcs7$-Yb#>q~NaYNgwGfs9IjFX)v<34St#W>Ydn{l$!VLYJibUD}iJ;uq7xNf+e zr?njizMeG_NpD_G^rnbpGp%2eh3A<7v&CjFbH~<5g|H z!}x->(`CG>d5>|j@3?-rz0q~zWZb9g(ZzUJbAxfR?_r#-A1~ttZO6xWS#v+*WIxC_ zT~}eo>)KA7@wVn^#>sx3ahh)w7%%GUw8%JJ*9(l7bo>(I)Ss6bCp#6+wVf*CWT(zJ z`O{!r_|$?2Cj8i%L7$-Y^#>K^YzREb|Bgi<}2{Z1~cH)eaoiyWQ zC(n36+bJ+kc8ZLXodw40+D?gavQuW9>{J*Rk5}bWWt{BP7$-Y*#(mmOgK^47lX0@s zV!WX3v>7Km9mdH{m+`u`(_@_c5>Fd$w`9k`xVS`>kCSoAhl_EtV=(U1cHE4U9WUdQ z&j91?JM_43!^4cz-;YQ$?$hOxXWWo~_!XZ5gyL#=|;(m+`XBPmgiRk61I@-iSLGr~J4WFY5dl zj2Co%+>FhI_}Cco<1uPTc#bbe}#Q#sddc!P2BtI2p(`_*E+d`#uP z&3Hlk)n%Oe-yY*_?U%T5xP2PhF9+k~m(zy37$?6B&Y!9B?`FJxui_rY#RiX z=cmTFPv@u3ctPij!&C$Dwhu9nM>6;+hg3gNwrI%m99s!<6u0c z?YI~xJEUYMugk~H;?HS29>(dsUd9tTzMt{5-VZQNc7lx0XghJn$qp&cf1bsUYC8qS z>AXe8!#aM6@w(nGGfs9YjNhm2)EFl_q&)u(7XMytr^z^-x5fC~I(~<7x*zE>PIh{X zAJujo`aXv8Np?tyQ~PnT_(?tAGZ?4yx*5MipV!NHSnvB7Cp&(|4{AF>#>oyT&wrf7 zKcMZT8K?8+8Q-Jh7a6bW{RPI!PKohd+D?UWvO~)AUt{riXghVr>AVfbx9a#U#>HCI z|Fjt=I~~R+w4EN~WQUaJ-=WV#e51DGWSq|HVtk#B?`Axp_dSf09WUcIX*+(#$qp&y zliGif#b2ZCgc+yv#u>j($Imlf*82s<$xe~+r)WDR#>oyT&wqu*zed}sGEV2MG49gw z8;p1Lev@&s(_;Jy+D?aYvO~)A-(&Hw(00VS;dvdM*TMLuI=+i>pWZhZCp&J&FV=Rv zjFTNwo_{}!zgpV~Fiz(UGJb)MA7^|)@243jJ9);(w4Ea3WQUaJzr^B?Xgg)b>AV%j zf2qfd8slxfUuT@`G#LMxw$oyq?2z*OcUb(Mw$o*t&f8=BhdRFF=Hd3{(fdxu$&QQh z?`b=3#>oyT&%c+&|E{*9XgdYQ>AXe8KcVB77_aO7GUH^Y!uZFvof_k0hm_~P!Qy{R+i5aR=WQ|m5got7 zxO0PQKV8PjPLJ^qX*&*`j`B%%NQqPXak2Ox)OHNU>AY^n->2hy84v4yALC@l&-lBw zogm|6hm_|(&f-_KoiyWg-aO-P*YS&tSM~k^<7B7A_+M%}6~@U9DbIh6#eYcKsWVRJ zZ7}}lI)00Bu~D_3HsfTc!}uGtogU+4hm_~vq0d9Sr0qBvr}MfPe~pgsW;~$xJ&cnb zFXMlt?f4ldJEW9PYX3nN{}tL!m~lF9objTLpJ%+R_X~`Zog(9ZpzV|xCp)A({}mSh zMcPi4aXN2}@fYa$4aU2AzsWe+X)*p>ZKuOH*&*fm@3Hs~Xgfl`ccu7rUI*i6bbJ@% z^xoEBob0$6&uTkf#>oyT<%imjpT&O;<)^EjBT{_#UUfZ&S^WB+sCaS4o0_K?Z)=`s zysLSE^OCY(!`Zm{xC>+*Noa1ZB)RDHFWeenVF@vvw!{_+ni-eLUIzbM{ieD&Wc-eWwe;|q^E zKeeAf{$~~6!T5o{SKP_?2kVNv7{5fH*I=Cdax?z7HD$-c_^yvA?q&R=4aI$o_o|Bf z8GnY(e}M6?en`a+GXCMZ;$g<;Kd*S4@z;D&@igNZUCw#Nzxo9gzrgt0-lKSt@fW{S z@dd`8{5i!-jQ`>h#mkIeuIr`3_=|M@tBkMG?XbppQOB<{?$h{+ScTujUtmS@YwwaI1a4h0`@D%>^v{B8&Z?h0Al$Cv4%5BM6OKxGE`iFm2(| zH~Qo){9=O8f`wmV;YAC7yoE1V_@x$JvT&z`mo5A<3$IxCn*%#;Wt=#%fg>#;cW|FW8obOztO_G7Jie3_bl9P;bM!bJz3{#E!<(@>nz-9 z;Wt~j%fi=NxMATNEZlA3S6jHp!Z%vD*TS!~aG!-=Vc~uYe}aVvEc}TU9<=Z$S$NpO zT^1g<@T)95ZQ)P0@VtdzW8no0zt+Nw7XB0qU$F2^7GAP&kA;^le8R#j7Cvd=RSVy2 z;WZ22V&QcQ-)i9v3*TnpO$)!p!dn*ZweYru@38QWh2Lu7T?^l7;XMoAW#MA$p#AT* zaEFCI-NKy~?z3>0h3~O&!@_T~aJPl;w{VYzAGC0Ve{Kpj(_M{@u+M3{nv`0TqU<(cy(>$UDt}&%db_wZ3!`6!OQb#(mQqvE4-Q=ZBEclB*oy=+7z$6uu~ zY>aOe9(1=jZhyp?{uNUH!Pxja$Hd#llzWTW8zx3>dj9yrs*x{`{3)XU#n||3SBX!K z;pAT%8-M+(vH!J79;~fG=ZM44lT|BorNhyGd)!Yje82GWV_$o+c;pG=U%g7a^-0x{ zZ(JoldDS@3KVLPD?fpVr=|FW8z(7xZ-j6JcWg$&YABcqu(7Jf8CgPkla4? z5!ByD$Hu=gCcdiK>ii3SG9vy^7xM+*8@cL_My2&tSE_3$c- zZpHatHZqQ1y=acy_lx0t?!+-%AwL`y^^yC&HGE8V!!H^chiBE1`#v{x?3zDZjf0pKp>FJ3hMfy=~)F2?4U9)JI%mx=FQicS0( z5o|u_9RHP5n14dzbI$P>T_%3&l=wB5VJ0ezequak>P1K)_iq}%cW8f^%XO28;mnW!&8YPFe~*fl zSwx4&KRF`q8UNUjt;=3$ntA2O_-9AOce$}UWRf6D;<#~qkJu^uR&zXaUb*CWruxfO zuCeP(r&g(~uX@5V``3=%Iu!rfvAyTpzhL_tNB&FIxUPT;|9IpzBY1EmOH$S?^nz`F zIr0h>NoV5%|7S*DqT-CJI8Vwz+hea9`SeBNwXpFo7l|*+rem7HH~_uq(<7&a_`&$T zvctp|sKXaMGCKab)#9bAFm;l#FIV{T-bf2KTMu0^Uhs>5x}t{(zC3!NI(ot9FByOP zWhRlW5=Sn4;n@4e7cMeU3P_GQm>;Zij&B=zfw`;pFTfqKb>A_5`FW2&?&9ouk3V^I z;=IQ%To*g<@m1qD*&n}b#37#v9~(b2Xd`P!uFF5=2P5LMBjX zi({KWF_>9CYog+4Y$le8C8uMD63OU}9Y^nu%+ANekyJExXXaQglSrO)uN8+AxmYGL zduJN|=297PEFGIpL}ri8Pi1q7+11?nb}lCrahU+;^bMe@1D@!1A9XA z$!KgQk&H#%IJ9fc?EKo*0(O(Bvq>YJN+c22fIF!uaxUE2-2BuIV`lz=2SO)fx#@T$ zvsPy(7Tsa2iLM@sGL7`1nb|}dks`@xYR*W`&rQWLh&jJn?8#&z=Z>FC$H@I7k-7dh zib54&B9+Z$Vv#vUW7D&8$1P?NPsp5Yo($#sMX?)^sf2d! z+ATIllBwjmxzv1iqnJ%h%_q;w>N4gcvoop8Tr6tn;-T`w{+^=;3>9y^EXGL`eae_h zoLqfg17{+$q14PwHkJ!zXBJx-7|GAgZ58-uVggm4J2pQvlgNv`spJ_{7TN=~6|=xL ziH#x`&!oi)3=f*+edmBL@x)`Kt;fl#WNTu?cr9A4;8?+O;OVc83u< zaddBJ@0~}EA3Snmf9S-Kdk!AiXPa~~gkqmYBeYsTgU+Usk=aD*lJk?o5h-Wv1T?Z4x;&=jAiH|+`SuDVKSQwO-HghTqW7WNnAKaD1^&oV$$0$ zbt4*^jpbtfeY8DmIZav{%+P_QN?|%OJBw;VEu_-8@{-fC)>LhTPV5QoKe8`$=Yi0% z<9qfV>bsOorH2Y*QM1N1ADW(toUc93#*!yd<2z;b=TLsq=bdSI8p`cukXr<_2&R)+7)zYb;zc_Yy|826I$2C2)YPfz5dMix z=fwVG1YO|4nLW9jwU5*tgYFjweW&8oUzlA(awcV<`eO!~m!VpIa(;F;os-uL^#?{W zc2*rj^9Z51Q`3W8)x%h8EOf%`EJC-(at8x$G>BX*pObCg*tJ_-IzweI+aVkdq04Yj z&!a_W5ZCNMj?B;PlFj)>^bzwhX<@Z2ngg<bhM^71IIp!d7KwUfxru%mR?@S@~?~fmVDqSo?144ND!1jcg>5kR3P z@@8S*mWiOBGdIx@i%q-4hCvDHj^p0@RQuB%c0XIPQ|-6C+mBk>p4pzznC#mihPS19 zY3f~U%Tteoy2GIBdA*^=G(BO^k#0Y7A{5D-%!aaw2V!nDmr%WjVfLGbY&pidfj>L; z9yl0MrMlBX52ui=J<%w7=IpUF%4_$I9cfwidk^k+?=fy%Z&)G^OdyikJ=q>c^3Xxq z(ATXs?%iYTvYJ_R&ijNMO2tfMHX9Rb4o41)4TCN}8&72h9muBs)$rVG1=Y6X2q~|jG)ADQbH>o{Fw_Ap>ih>&k|p|ov)YW*d=51% zXKg2AtNX)>Oc+Ctfoj1RG-wM0w#%G4paRUnV{z+9#!g}|ITNEP5qi!TCZJKAHHzLZ zlQpj_uQ;%8zkw-*@jxt-(#A}d$YLgd9{QXSF;JTrq%le#$fV{1scga=7`Az zqaTe$&7RY2E9TZd%pA-Oby**kBOW@?T?12s+M)x>T-jSe1OiChN0WBL;{va_)%jM2uR zPOTXYVQf6TQT`Lk;#x+|mB%sA%;M~O(02%`1#}CT^{*3pl4TA$a&B*!6MSf%kIxnaEhMJxmOLp?v2yUPfnBS-7XHorfmKnz#?0RDgeE@C)WyPB5CuOWC zBCC4>9nYGhWk+vBz%)CCd8}$Pm@t{Q3B!|E%Z0J5Mf2Vylt@#@Bj*CU)@0EG;a-NI z#czfBJ(?cYqDbZtQ%QByO^!=+-dT-8?jrGbaimIU8B4dpg`6 z&(0g@!?GALu!DKAIg>NHp*yEe?G0p7xs=)M+>Pl=Dr2_46UihlC$m#iJyIfxshio_ zFh|?4d);94GOr+NQ+OOfo4fm4mNvIxw@5^H4kpCry^p1UtW`982S~Mz%L$z}=13T* zQn?%+11w$dxP1H5Udss<)quX8rm^FEg+4rcu?}8?{*7A6(3q7;q^&Jm^?UP4+4C*x z9js49V);aFtudcWJZC-@ibYSxvSw}Kxk&$kgLy-emD8HFVlT!rvx+c5LLV5*_iJF+ zu0>PXJ%NMA1V%kWKN2u78p>;N!a$F{E${S3}2Q%ybT^63K>N}~y$#?9NP2wO*GBa;J70S{bka509=e^S! zx_6@9I2%&WkZ~J^%K%S})lHbXr#s)NcI?1VHJdtV-gM!%O{F+$+_-}Kul2^!;X5>* zoBJ@Jp%=#ps3x#65m3*o&Eb%yk>#UV#p39FEUf+-Qc#N>C)*lc&?w0;HF||x`YR3$rv7U;eJoO zE?5(p5qLx?)@0rNTMDyboJnMI=yxoyPj22NtgiP&SDf#m^WdOS!+YZg-=gOS1)%NE$Y?G2X2xT&0-2SbNvz-lnoy3Su(8$ zZo3yHzu`n@sH<31E_yn!M4>G4lt;wmCVB35D3D3~AI(04=!)4mP zli8U~;ZeLvcMJ2z+@5DPzyz4?so!z#j zU+DJ_6V*VkT=0Dr@n;`df{ea(*BmbC?Z#(euki&7B7E zp`rLZvHshJ(?J0&eh=DzO}s>7>OXbznD2!26alXQ={+(%dbwU?Vsm&^v}bnqU~;IX zsM{v_Z%DrDz&#TtJnDH1I>a-H>6md6cRKs;JfM5E(3wQ+?2a8-`NljW`VWHS*lyg2 zUJlic`-Mss_om#&jfk6TMfrHYLl0gTbpe_4*w*v*aGGmigJ7nPp26 z%FYneZ$9S-Qty2*$XQ}9aqX%n66Y&#^%6tiEvMLnR~?u-9NmLAfhTTL?>^*kGa(<5 z&Wr3hG{QNdCSbz6|HBJPywEW30V5a`P+pch#;mw~HZ>KQWtS!<@$??UGBT|Bbvio# zo+JAX?>`zkaOVm6K6sfoFa0^+vSWanKP*vNOLcW>q@9^UWtg`;33=}=Z;~;S%tUZf zy!M>jy47sXdbI@#S@w$PwRFA6RzHik9kJ`t+@>S*C*wJM2V|#wy|~+avS|(xG$l>l z5tAz)Om;-yPn&l*$IQt)-pfUA!xRzSoY@KP?GHa@uXhlG>S4@m%$@57L(GN^{VotA zk)=8HTX-aqMTf56^$* z)5y_h5(9JWtmVbi`7a=vVJ*Ar0y8D+)F7QaxoF4&+pQmK)BKX2y5jvQh9q19`tFlv zMjOykVdVuyw<(F?2WIJMdKP8-_+q(-7I;Z7oA2&j24?rUwbO~1y!YCu8nJ$*h6uBx(msuKx8N~^`bYw72&M$sZrS6PUX*q>h)UR8zGyFb(#|}&=k>a>~6kwJG zTPKv6$_v~{?!~obcEB;LViV8pH$^!mwHP1dP&YvNTqo9_Q!g<>_2*ACp}-RsLwh0b z$|r;7s-!If*X$hb&vNo{#(G2CD6qBykAR{yaO1_LT6^dg&&N~?wHeD`F0a=s4c*Dw zcgM1iA9~>M>pznX$ro|wdswQTr>!|Tcz`f?_=UDG=q6M&Fvj|N3Ns0~Y} zzPQU=X3Az=w_(oOFb>mFQDJC;a59i=6fN-sw{ zvVq5pJ9b#!yG+kUvRNP+L)>dI?#oA~MsyZyW1<=L3S!-Q^La}MU7@UJ+1r`@v0R%q z=p*Etl%aWpx`oN5a=6^FRu?OKPsTzyx!MP#*z9coJ&r0?>Zfui6XNB*+ptGDCi7J zcgQH?z2aYA`0 z0hU=c9l{GGxhe)7jrl5WaIK(D&`BND^ZUVwDNA0Cn0R5FNa7kouV%Knh*>8%avPR! z%LQNN%i~*Q9jTYeTQS2{-vyv%Hq__IaXy=x$I9)He9p1fP&;^hlO2j@c67@vaZn3p z`j2wZ`RX@=Lys%Ov0QZcsl=ces8%w#me+^?ZmbP@1|;8nnk#^0e;kWmr|!_?n;!Lk zTRsCB8YBCos?D54bu)NopH9T^gi5}Pw?0V_SlTn4kA%#ZO(FRNIY)CTGJn0`01sHAj6%LG(4AS_(#mz``sIXoVhm9Iba5)Rj~th2$2I{men zp?pt2XFdTh2c5y<5;Dujy0B`eT#$hATHb2QO*!Bua#<7a*1X>y6gBV#@!#t&#&{lX z(Q}bxVtx+K8RxKkZ#K4LhxOY!!+Y+bl*h~&xLP7GdkV(lZ%7X_YM%0_3dl9Wp} z@tqJXR93I6&2=0rXfIUCgK}TK#hCM=p`uv25O?f2lfb%6_W`)B?rHBe`<(+xyxP@` zePYKByzo-@EupiS2o|(vb`IIzH$RuYTiQuwcHd`~&!9AxDWK)rp&n6mIbvM|CJ9uE zS^1?btkpG6$J7dVo5I97&fktBx@8Zp^rhl2zB-MwXv_L~P>xW$z~NrJ|CG%uyy&e zbET_xBYI3Wt&ndFB1y}oW0-60;Neo`i1(;5461|v4Dn+5*fhFlBZWthW<}U_!=V{; zu&OVX1*qE-bpX2C#?l$xYs)zDW33;(KxRc1w36#DaOb{bm^A8FX*-XvH;$lBie^&j z+wjas_eofXVP25OaXl_mS$5AF*4=8Z$VD;hmAdj>+7Z~>a;v_NlFK;c!d+_&bsbyV z+2Vd^aZ6J#m}p93cGvP!NXkj{X-v84H+nG1>96*sL0`TnpTaEvv>Zpx(qFcHQQO*d zPz@3L^a!yruG1gHG&FBu$+_&gu`W1j426>M* zYv?h;$Y8|77gyDKMO7g~@#IPbe5p3^fLivwMD%_?U^C82b7y52(x2m~)5sTS@?L52 zC6V2TrR=$L)DtXoNy(xy?=_dZ_V&oPoRR6=9dd$#7O&2~b02Ob@in^j#vXb5D!(L? zN*~7rNJi<~W>>mSv;6hW&Nbx9zSv9zuRP7u%Dd*>x2h~~t32LvmAwY#JFm-FZbvQK zv)Gn_Gi!W)e8!UgFt)AxWRKFzsE@G^dMq^wz}>4{U}Wj9&F-3hGm3s|iuUQZt<1|( zFU~Wt&em#AzLLfA2Rt0YcT7`w0FS|me%t9fo$tup`G*6FPkl>lZpd8f$g$k!P-tRe zi&|t(r`oU03pzd-3Pt-*qWVY0;JA8!95T&Vj*}fyZEtc*2#=7>^;jxNKUe2TD{WAE zWZHVhq2$pGqvE&@Ip~WK9z^&Q!WR*~h42f6hY=n{c+)+j;v|Cq#HiSyRc=%G!QF#x zVp1Pv{7&q9-!m#22nIp`KLg}NFc2IFRj?950U?gieHZK^co19&&3EEgO%Tcm?stp| zCqm;9q(dkn6cFMFP5gdJ8X<__LvSNF5$bOr6%~X4f&(FpP=6cFkKjP){uS&X1Q2`( zoeFFr6cBoU37ZH4q4QR71OuV|7x-xwgfKz?p@*Mp2_iTVYHxuZg!Y>eAEAa2M`*kW zapB)%@c9P_Uq|>ignvZ12;92=w_qPYknw)_rBU(me;W=}u;db)$CLZ(5$|9BU8R2z z+QF}=EEUg=y#n$7xx0K=K%D=Mki##)oX^hj@1OtpQfVi38YlFZ$miJ~jEWl&_FXt; zo~wy%O80TZc{9TE5#EQmN0Fah-y0QIBYX$pQwTp99}|V|qK&K|ympm}`$sxWcg#NR z0UhUWm$175cK%)4dJk;85Wk=Ei|;HB(?4I#mWpS`zO=&hYk#)%dENs4DwJ6md86R7 z>_7730jJ|qzha3){MkQ38IO#KD{*|e@HVl0Jl6_sS9Jp;4}?8y_=6>w^|;;k$1LgmmTkNEk{j@8spGHw)zbVY zXprCy$9|yT-&dPZ<+$yjpEPeASqky-sb*V>0Xn zlloOd?<4Mdo%Z86w(nZ}0^K!Z;yKu+xRmyMdJWRdBJ4wW81`O_@F>plAi^xdK7?x! zq>UdwbxeE;;UR?QB9L8qzT?;xtmZAPa6|YgZ}VF zlnH{||Dxs(X_f36H~hr$YY;xQW=tH0z5==vVHJXmBg1c3-3gk9Jb_P(y}d&&0s&b7QQ2oGCa$&P3(8AYmg5*#|N#j|o5?{fH3 z!8P#Ph91suwebPAZ#`z+{!_$z5yGRW6M?p}l5l2|iWh=jh|t|QCc+zV{`F(xwKuC{ z&xghl?$Y}^pf?~~jL>7}`6~9C7WOE%D+n(^DDNHkH zienuF`m8%LF&Vo3@SV5emz_dKaO1j-KQ!JVpKI9OMJ}IyVsrn&$P$*8PrtqY+gwX! zV)^tFw=DYQ0E?mJ(|hH6H2H4al8Qp><~kbctoqg33&3es;L`T{>Q zu!8gxw4!{eHn4*9oBL}jmZ*glq~A{Gx2$OId$D}_?dJ2&B~HD<`k$DjcgjoISpNJI zlh)Pu{fz4-uzdPS&r;u0COgZgpV&tEr!S^bTA76vq~EGnNYlk@O}>)!6MDIXybg!b zuI&7i6Y?wBi`GVJODo8~XYe~Zi%!3S{BJ)j-vKO{`IVL5=Ap$SEDI~i|KO`>Eb+?D zzm zvi#q2p4D$wQ?fW0eOBV%U^!KPRbAii3erznf3l&UQ0HX%^j`Y42cFcluzdQ7t?YMj zhU}~)J^Pu&Uo-vW1eQNwNih}*SijJvt*qeu9()Z<|8CFFsaNX1ckzKe5xdwS0P75aaR{+WvzihWu@|Co|tp~9_9>2M~{+oQ)Nv z-)8+9;z4{b_~6X`zLDQ3ee}0X9~-2nYRXI3?j_~sAVm_Kby zjRATrH|a4C@L#&oabRx(Ki`8@3Tow?w2xnk%q8&qPl{NKnf4|o@iXuE-sqBtaXviJ zQ47%J55=1EPpbtSpHyEOuuVUSRX_Ok(2)6cRcl(gE%R$0802>g{WSGb77>4|{AC04 zx2r7?P+mFQUz^_tQg*$vn5MDF&paj4ndK%U;hSLn&yfj_T$rU6Y2k+)@k1GjxlrF! zza8}xu#KVglUM~YgWO@him&3%uEfkx`iWaY`X`K5oPnYAlkf%SH|4>{YwoH%}IT67BhjIdGWU_=f zB?9>W&?zkE3gG`kvtmvJ@c*GXkrx5{e<&}`i2(jTbPh|l0{H*XjEIT={y!8I_?<8O ze+a)AhXXPk!aw_POol`FXCDsBaLC+9$_@M&hzQ87q)6ds!myDNCq)4NA3BM4NByd7 zgVvAl{xRVR;W;(5gdq1XV4;Ei3PKG*9{(!h&xY|%3t+$wW$Ar6x=j;fZ5qj9a6?#9yUW6G0Cqfbdf2>5f5Dp=95YK>ip~s*l zgf>DQ;hP9mgdZS0@nvJ;=g_A>8_*k}HE0uI4BKUB3!#E=5%zt5G$vk$5Jh+;LJ*;X z@Lq)1ApA9gj8jKAif})|I}i>dycFR8!kZCZxS+znK=-|BOnd|3>Q}=K^bY8Ap)$@E zY`d`?Mi|BR#n2K$7sr~=Z@p$r{O<=*UkDo!evJLgpizV;ARI&}BD@M=4&n6(M-gP4 zw;=cs?nQVQ;TD852-40A5JpO4;`jc1Ox*Sc1nBQUfBC0)A5$I^fj5nbFTZ6>eCN$5 z>(?Vce+EC_I3|99H2;oc&jyeD#hA!EG$yVCdoj}8|JE__`>z`l?}44W|MGVio`d{6 ziv0fmyHIEEMw@}Y9zpJJe$Q}tE7&U$MiEaQza6>{!Gm~5p|6J45mtZjw+pww59Rz3 z)WwI!#LJ*x`Rg&U4*LIx|L>K-7tj`{-Y)yF+IB;qjk-MmJy9DIe~0>w;!g+4xH9a( z{uMa>9IzhtUyc3OV*7gt6@q}d~KZX4gw%?1;L^y+U z^d7}^i0wBZ{w(yf&^F?I9rj(=UW?#Cygx*E1=#;&co^sZpA5h4^?MP@_e1X;6F)(j z*0suQ89t8vdvSf=kKjUCKLg=U5FSE!9>PZurV(VEe}L{nxEtYJ2(o_C2sa?S9N`MY zc_MkvAatkky zJ$CCVENXx1Dj_;w7s9}{tAphLP(L&b4MOwKqSUtuQTc`tCG5ALHK_Z)glIzj&<->X z6Yhdu-iLf;2X zLmN={+rhqz0F6VPYmkR7e1l#B^?V9(pp{1uAKHY*zlZeiLVmxG@`F}>fOPMM{t$Mg z{s`qE^~cDo)Stl4d!Rk|D)pyGC-rAI*L&gT&++%{rTzl-BlVX!kJMkm?)#vR;kZ=% zt-S@QBO{^-?Tn5HJe!~_%9j} z0cif>5mAA59zP;_(BP#b!v8_|=^POysQ2;_(ScU37!lsTM*1h%oDb@ShM_HJ6`H>m z`Me3|dkXRitwGDs;8RCL7utjR+^}Q7F0^qS>_Uq-pgiz3FaH|YgT|qGXbHLib=-*Z zfx4hAs2kda2BD61hz~WOd8ij!fd-&0XdLReX$0%$kY8vC8iZD%d1wP#gtnnIXb;+f zI&VfgH_8d>hWem>Xb>8NrlEOg5n6;c-VR*{e;`~ePZj|dmk zu^#0KbwmBoI5Z5cK=aTRbOG9dR-ldzI1khXZA1Ogp5zS4e zf_k7es2|#bc3{VU8~l?TT9h1Gfx7p?9@GnML;cVmGzfKikbkHfTG@yA(As{~H@-mB zIsm_*-rJEL8uX(*Od|b3oLA~I;6JnmEkWal;6JnnZ9prB5q~p|L-AJ#ME(xcE7Wxa z?M-s1a|_A~>W1d;M7=;A0i=gkpsuZme;53PHlPJ){3xz-sN)#yZo@v*3++MUQ1@}{ zKLUFv5Fh)lyI~jmL1-2GC1^*+hl*R^-#xGc^+UbTAT$7tL*vjqv;ZwaOVARu3avmJ z&<3;(Z9#j`4%F#|eW)Ai2%^5BE@%+yhNhujXc6j%mZ3pt4H}0wp+#s1>Ut*fwH@b! z7NOo};W~xJ?}hyxNcU{y4_bkiK-^r_GVs25s+-UY2gPeEJI7eTwwH$pw1 z!T198Lq7uzL%$BqL&Xg2K(B&Upp(!pbPDSDM++eO+&rVqCB2O{Xkn# z_a3B!`k?L{;zJwI5;Q)Kcu>a~T!*)zeL;gz|5+S|cAzb(dDz(td*_g!M-cD#P|xpx zzt2Pcf|sBR&<3;u^*{DQim4X6j&f%>72KS2A2`k{Gf5xM}aK`YP(v<_`S+t3cQ2X(y!`Mn(+ z8i017aj5&H@EhudmY{xU6`F@OpcQBvT7&kW-Xi?36u;2O< zV=m66y*->{2Agyy=CP45I8glZM_M0p`AD5xYW16&SCiV5bQvEP%pIj z7bq{N`>iNvs25t1$DuuF7L1PwrI&^WXKEkIk)60`%YLVM5#)bVcAE7S$;LETX2 zQJfd*hWepCXb>8N=Amh*<2|qgEkV0b_j_@kD$eshw1;Cj@B85g)b#MS1)a$d5b@ zZOY@&4m1xH&qO|C8!7SYfwM52aSWr zzld^ywxDszp#`Y-OUUPEkq@XF8ie|wacB^lhn5kq2(3YD&=#}>?Ls?H$FpD$YCv88 zj(kA9&>%GaA1EJa1=@uA+bADs2Ws4lbWktU@n!e}bwk_G8dN+R@xOw2&^*)!Z9wBt z*Z+aNN1!Ec~EK7xGx7uqrG`=N1Y99o3tp=IcU z&?@|X7}|yYE7bc>LWpnT`i8E9wxGwM#y<=3d}sjs6=)Im9p6GcqeUXdYUGE-B1tqgV3f-5A8tn zP!UFcpf0H6hlmHQLH$tgk6<6#f##t-=mOO7W0Vin1+7Ef&=%AS?Lz%fM+E1C8qhe@ z3(Z3V&>}PrEkO&=3bX{RL95UPv;l2F+t3cQ2kk+fQ#k)mP+m|M)CYA#gHSIt4fR8d z&>*x7jYDhDJhTZdLOakBR7~UiP#3fY^*|d?KePo6Lp#tsv)I(qOQ>VnpxZfFbY zg?6ESs3VH=Lk(yg>V@W^c%vzb&^WXNEkG;K60`=bLL1Nqv;}QLJJ24q2X)49{-45s zs0-?Yx}ia+7n+9pp+#sAT874w zEVkTq zZYW+@3okSd^+OBLAhZOHL#xm{v;i$b+t3oU2dzMzr*M9#8`^;Spe<+++JUB_J!lc? z_$B;@x}Y_v8`^|=p&h6nDo*44P!}`~^+5AbKePx9Lrc&+v;tj#)}R$=16qf+pe<+z z+J*L@j#-@lSMVR|f_kBDC|=Kt|I6O{$H!Gv|NnP4+je2=CP*nkf|cbAwq;o(vp~@M5$PzYK5v*3s$H#@-7f)X@gXVPGmZUKrxe zGx#mgVen39Klm_|Ljp$O819BcmqJZwcr5qbp{>xB(9&_da|1d8Eo~;9FyYX~!;mB2 zp}V2s!+Af)7YySOXzmuu1vUBJIG*=)Kv#Z<^ll=*(V$TdHFJ1}1GFDH2rWGVJwR7N z^KT|!ALjiV&_QS?wDm~JCHMsRpb2Pk3;8~ZJNeMTiR2erdo<;Qnz`g-KIJ`zcM?Dc zKY|{h{l`*{TacGWIiRJ-Q4Z(`bOgHLqd}v73;Ka3&{J(b<+v64G5De838VupJrRCr zVFCQDl=CF&2fFm*=tJnql=F+E^9j<2cAiT8Lsu3N4sHA-<-84gI`s^#okaPd=BIft z!R^E^AwD#GCgIS+Qo^B~lZg*qc@E)il;>RZ2rWF1aOl!<^!z2_O+i1<#;N2F`gn|R z$~6eB9-zF_hzH(Ug&jgiW+3klXq@z*g|jFpG+d1yp#7gEzjwl4gS|m(=TI-u!dmnV z&7F(=E+G9n!l8e@969(qBhclfmwN^Bz!T5}wDBtBeHr;zlRs!=EvjiME@^$2*pWOSfo2L!qod=LJME;gxUwkiJjy&P}A@ZQ+kI~m6^0kI?L07NE zPRQpVbO`>!b?D=6=o9D(8h(m$LvPtYy1$|xq502{&jj|-Nj`R9AJE!AQ%-2%PU3fw zpIwAQM|i)(Ak=(|{C&kRK4S1L6w)h&77r30&NAx3JF_|40?j=#%gFEM`%&P~{#?$o zK*QYU5BH$ABz*D%Tp_>TO3XyJ40O>p0}9=r|O3Y9ys zeb8a(N~qkM9f0O?hjtiR2;B`Wg@zYXE@(bf?%kF`JE7IkrO-xbKXd_f5V{nafc8Vf z+}j<77D7j$CN%dRpGyyGzhEGQhv=G`1EroVMP3UrH zEwmrn3LS^4WCK=p@qY03%LYu)` zp|#*kp{>v-p*jDJeCSfX7hZ&1XzRt`-zFcIl3!@$<-~(8_X_w#K6E+waD$u~<9j3Z z{2j`FE&R~dCh7%Rcs=3J1T=p+_zjd3ItXosnl~Z`x)eGD&22_s&_-zfcggn`s26A} zv>qD13B5pbZ-yV*4>cado?0j`bP#IF_xZ?|@6bMI<1N?+Gy&Z$-*3e(9wPtHQfT;# z@I#kE%@u^-#&_sSXeTrQ?IXPPOXwMzxPyFyk3jpO;XBC>G#8o>x**Fiz6bp>=|Bsi z#n4u$2_1yi3w{@Jp^ea$5)K`Jc0z|Gyq$XZKKWcoexM`JPH1Tl<$(70Qjb3%KR=>; z(2<9+hn3{#7t|M2&Z(`0Hgd*j7}^Tm4ISppTKNwt-_xWI9b8YjpkdBN6#vLj&jgDY zf!i)H0n!>EYn%!`KkKQ$WGVP()E!cT`vczf`A6((Or zZXJKy2uGarh|CuLhQUu1RK>f(i6`N7QRLA-T;#6dFPHH1p;mk=z0G{B20ub@{Rsbd z{%U>jz2NoWM`~Un!asowH-bk!coDepd+`{!pZq$_@0H(@L4J?sFZQIn0=$)UPx9dF z!27_lLg$hEZvpT0!C96v7Jx&XNBBcQOTHVIddLHBhQCy}si!7Y|5d84BwiWe{qz$> zI(!k|<#UaeXUAIYY&g+BB%mE5s75!y954}q%+2X92|Gf81TVy<5&w# z=idyz+6UjBLH=IwVC1$@|p$H9BRhrk~b zUi8}=oEFXPk@OdZV$s6(F|lazRfk6l0Z=mxsCioC^B%-h854P1kyrk=LnBY}T|@y_ zBky?0pMC_7f%k)d!Umy|ejWG_I4+;NJTWOxg_LJ{wD=32+*d|!Pc5Lt(SkeC0? zp_8{4dCkbf)pF-AMc+4j@;5Eg=0U8MEcuI%rM(_PdCQQO^LN9*)q5BD`%=dI?LuDt zA(U4(4tqe}A0&U%qB)C$Q=+-OS(VZJ9?PG&2R)z?(Om=ESXVF1Wq3Ev% zdDSCo3Vk4XS9s)2^OIMGysgOFhP;01*pvCAzjEg<>&$>hptK-~pGOB#|IhapU)n_- z@wfd8|CG=6BX|pV0{jFUgckAFBOMZh@{NM4{+0fxJ!^V2KSd{l7nxSj_t^1XNoxX3@rN!1$1;^5UjcmsGNxOZG{ z17F~SF9BZ)?)76h}Tx@+XBh}W2>5(r) z3ui`)XQ>WUOOkZABWLA)!}zGigb2PDd^LE12cLk64T5|1Uj#nngU7&!!6%EHmZ%!; zM1Gwz)j%5F0$zw0_Hj?TJ>ccwZhOvyX9ajExX85H#b=cdCSL{Hpu^qq1m6l?>`A8# zd=Iz@{y3kdUuX|9B;BI4I(@y#Xdk@BR_%;vVLVz~lhmllEg6q~Joywn4n6?xwW|j3 zVQ{Z~wt)}%;7hdplO=!p5&5gYx54klH-L|TyUSk$|5otb;0N2EjQ z!QJ&GHd6*(4qm}$vCl<8Q}R{#J(Nac!JUlp@G2V;>n+Y(} z0;Wg47%jyU^ziEpv_Myo{s`$G?xbJoq+coNliH%OF`fPmzR6caf;aglUm3Z=FL_Bf zkDjcL$?!*qTz7%+AUMtY0s z5AAtR@B!cWv+VdKM2Qi9g!p@O{2nVlgB9tow$dcaQsP`w<=$#fZNwWmI$$(NJpD|7 zE&m5-IYUkSE6U zRe_W|>{$1Mjy#qKJn|TQ{N=4fUN!Q*qvb6QBCl84rS@-WRrudte>oOGBFb@^rbHJ# zjAx-{Ir6-EC;(py{u3u1qUvrbl8RWZN-_>0)~#uMK(Sc>#l^^UU^8<%vBba(-G0THa>l^||C} zdmwH1d`IRIDQb)H{ zOAp{8rvd&>_=|;KKO(0MeD_BK#xlW_9NMiV=W~vnDyyZIz_JSd<;Mq%&j>&I*X2vj zH)lkPtD=R~ZWEF8w-awA@l<~*ad~81-7D#XpCy3uudiodof4scw91depOYUj+umpejCznw?;+rTA#cY70j33vkB?av9m3VfRnz5#p~9O68} zzg6>l=OKGEzZV~WH2$cM`~vVnAG{2Fzz44Z@Atu*z*qa=9pF+wUjJnoxYV~7Ujx1r zoMF#-q`aHK`@r3Po3yj-n*Sc*PPSX@Rj)(QkF$1}((P78f>S+^TIdmZB_tsF?o5?O z3reojrOV5_Zq$LiTJjrtP1FFKa_&q}i$s3#vua_YfRB5? zN5DTJxUvi7FEVP=PNqk5@D){g^JI|tWWadA&8ODmZpV5oKYTi)MIqykWiez&TUAAb zu|$8Cp-@CM@oxe0&UWO@W4y5jO6k9P2wzUPH@_>u6W|BRuhjcy_;$mGE3V4rZ%aM%3{BFX@G<(E_YuE__-0AKxYtQPIloKJ7g>%;DZaHN zp~_GEmG3dWs(<2#&j=WMC4JS;pnvzcs>W-*UR56K@*SsiyAyc@Ct%OW+jVgAZuOO? zW_{=u87;^g!I{84aUQ{Yz-5t5i2W4tVFf@gc$;hPvmTYzY%`# z{9zY(y$>FugR2FfB=Yqma`M0zfP4M#67W_ZJPzIr?p`MrISt^QK6o4Wa&T|DOTY(x z@KxZu!H=*r2^IMpz(;)Wt>DHv>G|KI`MvoZk8_X%zc)Vx;HBW+bj!fY!I@4ukL0HY z-1Nblz^lP+A4MVwe@6!XWt!hT|BS)E27Kwc0nfe=-HD7BQzNW+Sp&y~)`wW+TK|2a zy~wHmOu#r*GNK>J&xDhxUvPq*NAM!>K5$vavLC@?;7fh*I`HM-lZDx}FVq4)=!5rw z?*{kQ%L?%Dd8ziChsbr{2Dln$RJ%CeSvQw_ZPoI|@Y#L@-vgfTq$B;#cu5#}j|;QC zFZ4yF%$T(=#90eVwKM$oh2q3t;K`??*8tuM{%Mh`AHmzemxFuT*AnohKKLr|KJb%m zAyARC0bJf;;l;Ot4}iPNFa60L@P6=v?N162Ebqi{%aL@;BtdY*Igi*(4R|j26hW0= zWA6(!rR)o}!Ph*CJ1W9Q{ps~pH7~dKg=DWIWn)P9%c}l~pEEn@fA$2gb?RTjw-TOD z_-R^xPw+Y?T=Kt{@M6N<>r#SGD5U*@S4uqn2wnuf06gCYp@PT2JALpv@IDVN`DzjV z&jxt?CXcdPndMkV!X$hN;lqTptf|9O_k-5xa2cEY_Jg(%9=;&mu6BVNK6vO9`cKmH z+F>5}ZtyHgM?aEg33vj$Ilve4pSB+0o(I;;hP999u2CSag?cm9&{+~a@sExz*fvZI>Y6yV^6=N$ZgObz^{@Rv&Z!Ox2WrWQzp zm#RgEY3pBTly%Oz3}Tt@I3JR+JNx`8-xm80^Sea?!wFX z90xD{Tyi~IueZ>iN%wLqUDj63O#3iB#P1`%yPpyJSOMM%{xeBW^%H8Isn*-@C)K&Q zp!XQu)W|h<$3a)FcOR_z!5-v|An$vQJhkqSvK|<$v)of>-C<_r;!N^lD82R)x4dNj zrt5QV(!x^~7^nKn6a6khUO)1@dRPU%8vHSxZ})yr(yx|dm>$O(O`t5CV3gqb`f$1?B3gcu+?k@tY4k+iQ=d*F~w$}Y}SId+qhf||cc z{%VjnjJzMG%2W30*n{4MaoLxzJki4nH9Dpl0JA$ zWZw1fy!S{gY z`rzZ4_~d{`J^3sE5BuO{GEoB0@$lDxi+!*S;XE=vG->`WLF1|G63Km9#$!+SCF~M^ z74fA$77E~rpX{%^-Jj)XNc>&IUru~AZmIZw>k{lRq@=HgNwr>I^eN(BdC>bHv>hKu zWm@_^NC$FDufqS4a_L9f!ZPq?@Sh4sJ9F=YFx=^he=NcQ6a9Hw+CE4fQMV%}ft)Q` zPOt1UxX&GMPC#}SQvB>`_O7tgLsdr1Xz9#od0ef$i!7cR{$fmQ8XFFJf8-8dy~H9- z?)?$T_cG*GU!B|^ai1@Cjd%LQ1ZA>@x2eo`OV3yNWV_@dPjza)Q}VqR`P-0>E35SD z_G6R#I?j6evl;T8vQHxUt|>v^jic;qG0eDLkytv>i(@Ma%;LMiRb2QLDzzbo~eXCCRsgdhAoK3o0twfHZU zky+Y*X^`;t^muLH&ET2hEhBu0aJRoA>8$}D1ZNr}^MCv$R{FwK5xJQtde}nvFyRv= zTt9;E0^bJi^*2H+&e75R=DQ}RyT4B^JO8EA`C+`Tn zFNF`kL-_RgAo|Kuly7A&9;)aUK-%59fhw55k2rV-xDx?=*YwO(D(0x zmpS8)+UZc`hkrNx*9gDLA9bxQLY1HJ@;?7?(feM)`v|vsA`xQsw-d_fU%-V&+Lw(N zfeZf;9y|uV)JIMoxbf9=f4T*H#D~8Jd^h;fF8Rv-HUFu)J?VRwYP_Bv9a7C!^14~l z`&w#0Blvdkun)c$JO}(lNl!m{&xL)!T|)Ru!m}h?KZ36UUk)C!L8#yxz?Xu1 z+v8U7K5*|mZ4Y=S_~9NoH@H*iKSL;DKOysl(|JT#&s|UQ^2VVhR>w~WYulB*WfSW$}F7R?6JXDT7 z``~%t#Xfimc%cs-2haDx8^CjY@HX%qAAAXT*au$)ZusCEz(>$?n*D?C_QCgnCw%bn z6^vg#cmepZ4_*c?_3KTy27J&5Zvr3i!8^eFeeh-At9|e_;46Lb&EU&@@a^DBeek{D zeLnaEHm*8-@FMU9K6nhg)d#NwZ}!1kz#Dz=9`Je}d6KKLqd!w260K0?0~AlQDSpV8M`rv!P`+V>TmDs-zUIZ@Vi?_Tn@KztZ4!qe1Zvk)g!F#~#eef0FwH~~L`dtTJ z4gN)`bo_|kAbVjwvVW00hYcjFdCnr1*u@^=i9W_iJpBkheww8ZFJ1sH`tagq;Gz#N zUIQ-r@ZwG2q7N_L0WSLR;>*BAA6|S7xah-+Zw42Ac=7GvrVqXsyxa$$fJ0yEgBO7p z``|I~LLa;iJl_Xz0nhcpd%$yi@D<=;AAB9S;e&4hAHhHJ>R}i7ZXY~k(jWNXdEnc8 z@DlK0A3P2|d=>a=AAAG2_=Dc^ZUtZNgYN-f>VuE3^7oeu zz&m~T%fQ7y^`=_`-s*!lfs4QFb*ZbgWz-xW*&EVBO_;zs92j2@`?t@R5 zVa;#6`78o2_Ti6#7y96J;Q2my3wW*%-UFWFgRcM&`{3)q4Ig|9_z3e(Z~k|I@Akn% zGqHakJP&-E4_*R3?1RU_hkWn`@IfEE4Sc`{Ujp9mgRcT#?SpRsU+IHy1z+xi?*U)x zgO6wNs?P^60PpnROr4D~@CD!le3tP??@uJ}U$XAoKGwT^Zq1ZsylEr8)L(Y!E8=D)0gDSsr`?_z3uadGM{^%jq9a6kM-k6!CEnct3cX;A;Mzyzk2@m|okr zj@Ao*!TFXy*5u}Im-jve^*MD86}Z;TYTy_D^P|GAAIX0cxcIAHyaQbPTQ9y0T>N1# zz6QL}2j2`nYw+F9~mr#e>=<0=m;sm1J!;qW-ey7j`0$k~OQ@^851aPCFF zuP3>GdSE$X_a(EjALM+O&zAkT*6CQv(FQaV+4%Z`M9; z(b%3by`jZf_XH7Y?WgZ0o#hWC&+F)OH0aUgKX+KAb{uG3lTz-o&oW*u3mAXY<#zXf z$^BWHjBG4B8Dj-{lSh+mu}i)7AaC`z0>;&nK~=BHu87ROB*&ZOZWcXFJOWI!R419F z{cRv#f_OJeJk?&E{iYhr0%@<-^#Zj|um}FqZ|ieKYTa0NzgfbhH?v=?Aae6Apnr$& zc9EORk6L$SrCH~}5-jI1+Ci@mBlC;|_w3SCt-b5Ah0@2aG4v^V#jhbLUg1S9Bri5%0(8@!Fht z>YSC6w)OrO;uU_E^<-@)J?ww5uCLx37))NooE{0zaKzXf-ck=Mh@bnQKDUvq2g@#a z+lWq0S*En-E%3L(zfKqazX>(u_o4r6 z`;&CqNXJ<5pG`;F>2}f?CY@b;7CYAclR6KZ>`&A=2kHF3W^KoDA_ogZJ&boWHGTd| z_5X9w@As19h~AH|>O(BZYNzK%+-C+PZ61l=Li~~U5r2)O|NZxtzUX6*q)+_kB}3@L zeST1#JD2k?oL5a*IGAZ)H*vYS)9!QO<(ZCeiP9`PE~~|#eI#J~R)$oM9WnmNx#aRM zkv$rQV>I@KRk52k;_qI~dYYepzUSz~>DN}lKlIb&dwzWNvDBjv*Ex$_#9#fWZa1lR zs!#nS$4Svw-skWaALAUkkG_)qhV6%BeI?-Qh*%{Sc}>XMy(VD1ad7hPA5GpmRCe+EBJ>)*FMZ1%JV?FUMF zdx_t-K0|tcb<$G@K!v}kj`p`9gT6T(==MW|zYcz5x%*7HJ@_W<8@Xi>80e+ z>i8tx?ZnUhHTMN{{M7RkgyJ7eZ?YKHG<7_y)n$R>K}QH+mim8+s=aJ7>ng^R!4&RwNccf?MSHlC%*jF zqRM}Y-Cf~WT16ylf-VV0I76kISI_wOznmxYDYx9R(dn8I-uR-8$4*wQ>D3W``Tudw zMe8B0zgO!5+Rv)a`lx?Bs0o!%{aE%b?PCM!EM*eD??CON$g6LwBNTfLUBUWWg7d&W z<;?n&){54tq*ns}@Q#4N&pM^qcUHFWTNT1gKh3j~_$|aA`3ruXU;Oh@;#YY!JAwFX zh@W^f_1>x2-DYrOxBFfqwB3?H-Qm z+3zjj{W9+NT*k@&VgC77?uL@L6?xVC=9%3OEe_6-^}boMuBGO|);b&AZ6R|(XMtu~ zo1obU|`8J(QQB4U>^x;PMTPrW5t$byZ7NIjLwLfuJ02+e==xHAKe}pmbn?@lM3p8%{QP@-lOdk zAx`2vqQ?R@0!B)L#=iydo9D4!aFwM*$B$w$OJ78CAhm)gnxVu!GwbRteW2qM(ub zN8TDssJ@K}95ntTyEKUCD3gN%^l#Ek9L`3i2z~<5_IPST>8b z$WJLG9aS~4mT_YPata%Q#;g!uGS;sir|N@$$WT{AMy>o!xE6W*#^FmJ;tTT9`VIV+ z_ObG+I~JXa(LicfA!1ZL)FG$uj-b&q)>n>NACaEM-TC1D>lvHV)hO4t2@$K1H`Ea{ z7H0X%)B8r!|6ZA%FM2iBVkEcjso-siEmyJ;AH{zh z@^81Q+wp0fmj5X2FmxUE&At#*Men@q1uI{;#M+M$dnquJ_jsiZC%-l$w~qRJ!kI*>QaZ(QD%lt)iSn|8~qvcHWkZwvCmOM=Gt zb-nq`KZ5spdQYYw3s7K6|H)E^%pVFczQ+6TM)N$rTN1)2o*eZ zy(Rxh51t1u^1b{e8TjKF_!~0txB2krk%uLkUzJzsEqT94!ql~?4h(Bba-k$CICs}B%ws}A?Z+XG&EfOr#bw8Fjdiooj+5HGI7z402r8#BaH z<=5ev^s|ESr5WOjem3ZEZ@OE-mmeVBULEd@H=&vH1qX;%qQkxM;^3C4`&brrd(5{x({<2d1>_ zl;Fo~H=Wd0s8NQjsvE<-+tM}%8mSN4>*3{j#8i&T;guep+$8@@B;|4#j!=yH4 zh?o@#{sf4PWXI1}{z{xA>yX#H{Lsh~eXc@Y^>@?z^$p-A_&6!2euRH3csaPZ5B4MY z9`I5h{_!`{--EmBU-%2ah2M*pfs1@EUIQ-q@#0P3l7BDW0bc8aF9Wal!PkH{`rw})Ui0`Po3TmABdPJb@pc`XMHFB>JiZj|u0QNovv625Mf@U5eS?;RyP zZ+_TCjKsoujVnFqjLW~om&6kBC%%z zllEp-d~)%}i*BKRcqnM_Y9A#}`{T)T6v)%RLo=O8$oGBcSyLmt+{VWp*ZWeE&Jxn; zToE+7oph4tzmofEdcIyB+~+HgB^!?7`6OgH^3p%;C7spZV}9e)->H2EIS0WMFQvm{ z7P`WM+{+khz7)Te`58|4!^jqWyWcaS)-jNGnWb%~v(&{_`t2cp`457|UvzwZ?k$6U zZ}Tl(Ji^Y1PpR$mM55n4q%%l5EV<}<^t`XZU61uQ`O2Ff$!f?n-;(UB{=b#+^oK!% zpDiBUUetT&(syC(@k!drD&!dtr}m%HPi+7XgXi-d{MgR2FPYP(3A|EqQrygserRDiI8;>_GxoZ%OfXd6bPPtNz=yym+aOf%7Vr@t zyazo0v7iz1@UH;R^}*MH=YZ!6Kkm$2J)hVDz7kydJ>Ws=MK7bN^!E@xOt^cVBKaDB zJNfnCqMrirT7GmGlGd*6enRXp4#R5rN`#N@xjooG!ZX!-DI~l}r|4ha z17bfC-lD@lE}%+}SMw>q)UwAVgclP3La|5QQx%*>cBVymTO^h%ad@PESqEQzf6$nv z?bd&N$ljmI%JJ{7tOX~LH~ve=`vvO{dcE!*d2hM{Z+U8Zi45s2Cq2>UD&!6DBj1nkS?paE zF`pJVJ-RKceN2x!-5tEwjUgLZ4-mlh;c@#o!1 zdhmCq^S65WMIUka^Ir&B@6AOY?ZIFUfe6KRn=Cp zgsSwH!M_~-Sprz~ul5EOOAM!eCD3X&nZQh-(;?tu``WuM1lz2?% zyydvwE{E{#f^Ro`On<$6{9vh4Kv?oQ;V#^$EkWo04@M+asvgN_3E@i#KOw-E)bjq; zE-y|i{xLFT2P>Md`imyw_5FtZ5RoKxD(i<8P`<|b>mj`QztLlmFDhzEe;13^r*uz0 z3ix+-^~6?w)>cW6!>Kn)0)Fzi7UJqY!@lw=qt(nbsDZR@wOZ0q_1})ZDAz^-NC$l_ zV*J$YE=N%1cQnDb8$NeCk@nL8p8HBVz6`wlcj@aLYrso=@Xg@G;D@Uet$!kaJ9r`Z z+X4k2lMJf->;=zxH65RTA&-D(St8Vbky8Y|8@xggRZgp1G4R5zskmzY;Pv43njd>k z_FvYESWXvyh;zYqSiEXcuEfcJyTHoN^ue%67n26wOflz?vmUkQG{Fk_!iJ7Ra- zX-9Xbwxc>&$1g-5zYiL#z4k}#GpQfj&J`j()Yb-~onxm&`^Hv*-{|@rd?(c%w|c0t zR{bNt{`FwW@2E78ZU=ZH_}Sujq}wS;k8>Mp@cwA$IMN}fKp-4#{}26_cHXiG`~4&R$8j0To9Y(?xA_-J z+ydnnY(QRaR+e$N7_3#_+Rp9?W)%cf$8U8*b6lU{<&pe^?q>WNlVwE3Blgq(P>TMu zE8@Ou_u57LfSAaD4?S;lYq>@8o=8rO6$ zYqg^-@OK_bd4yj-lCNFh3&1b-&zBly-TC@o-wx60uti>3C++p9EbH86y579~_1PK3 z)k_vdUJvqeaW^X!*rL1GiOk9WKzCGwy+Bi?WPkj~zcQEcq%Ttbo7pXAI-R zd=F*6zHh%ZhO%)lKA7V&R7A@yc{T7iRv7AihM~W|wr@XkDh>v!DXomxE&R3G;cush zFOm3Dbo@8d!`DcBJA7wqxa4;We@mwt#y!x`p9m+v`ZsuZ9WKW^>~WsRoq$Ji-@;ugvCK;?E~#Yk{x7%`o1AhHkg` z{O5wsn&BefnHz_eSf7)@K5a#X6Ky^?5gzcvEj^6Kj&ty}l^*`}dsT0?9z;(?V&``F z>#5-}DL>)G?dXz*99mB}wzeoZ<3nR(_hc;&RrZdl>=_$t9~ZppaE!~0Wc}f=({i$2 z44h_UO$(fs6MXD6k-^^*}YvbmjBwp z%!tU_gS^^bX?f3Y_1Et~%3FrKScvxe5?7zmbLbxAu?8}VJummIubG+Gz%$x2CtiFE z-9BEg+V^(y7yga5>l4Wz{gK!vW)JPWhFD9DE&i&-nXZbDA=4#VImwh{UVa{TN%srQ*IYW_P@ z>BO$rA#eA;wOtQd@>15i^u-JY+nAqS?=OK?Ad%91Zzr1ni_dLTmTI8*><>8&{cSZ9{qCN8N1ARta zR>#&NFa7)^nt1#3#*HF17&wbXBJ7UfmyL#CL z@*bSyys5|ug|Tn?Axy~#9X^_zmFaTOzsQLrM}8bw$=UK-rT@(Np(?%kK~ExQ33B>v zIo}>l&co?)w0^cCrx}4NKaHcw$()}8CUS+9TF$3Nlao0=O~~11%h@|TihdqRFBdDd zqMtR$Svt)y#)w^R98C^KF}!-Fca;8PFLLUq8^%BQ9O|>==ra_w7w7D4bMV`qO+f5M z;+HXT&o_1aD-RI=K~H?_MdG&+f14e@PTOMy{aR0Wc8sAj?N_n^Y z)~_DlRv|Cv0>)vH_ov@z{c%TVvAmlj?Oij>XM@~=_8H%hS0n8odApIP%KNY-Z!vqR z_L&PtMdz%1H9Nz#NW5JA9D0yjk={!eKb#ICm>Z={Yjb&>bHB~SmRGxnEy z_p6%qWwGlkExVqQ$*!fpFFJzx?T2`u1LJ1s9^|U|E%ShBGQaH|Gt-*hUZ$r3731*! zA3TkSJQBZ&_&NBeFX5MjZnWc1O_?8?%pNjeigO#L3O z(fc-3M*c(?3wF5ZuMJlYMJ z`}0LdQocf+?gHYg`bRHv7CA{td!8>} z$d-Wdd)6~T`3F&s>a4OXU%gC=i2f%W&HRe@ClrQ|_x6jU=|#r3asK1c%m|lCeHH|) zcHKrgyY~c)I?4Bg2TDhcdz=~x$j>YL)aTU5c-B5PBdGhB1;5U86 zRYYz9Pj|;EBUv~47ql`G>?8^Y?lDev=*Q_XRF?f@(ck$;SeGgBu|Yy+r!Ci1$}6ON z@%jVczIok41Om8ipIQhJE~;R~ePxBz;%9$dqy> zJYxdyI;)vd$b|p@RR@{U&4kOnSl>F!7=ep2z6~L>uIZB{3C{{>Wh9gCGU1t1_-Y0_ z3T6t(lmsuy^woy+v3^Y_6P`6GQ=<(&l|fXdEd8hPtOn`w{0bM`m?3}xjAsvjTaV*k zY0!8~<{P&@w{QQf)ZhM1&hM{Hbvvi2_t2`hZZ6^+SFfBnyy@`uFm1#+-HbQ*D@O4b zqT$(*;BEf+oqq6)NY=AKzktffMgFi1`9A4pTMNxmyPd9;v2xfV&iWl@vY$9HlDEX0x)@8dOnKt>URA# zw~tvA>dER2N;yS04R-v;lX7F=SKIiSBwhzT&&D4~;w|9U*!apM-UHrf;}0hB72wy} z_%aKZyvXCq_od-vzBwtI`oeR&p3X*2G4Vp5Ps)$;{Q?_rOyUjTpSSUONnGmbE*o!1 z;!;od+xV49d=>bkHeR2^H-L-1s{CA@#HF78*TyeR;(Ne9u~hTln#9L5Kb~UawfR>U$SvmKeq<_4qh@M zYl)$M4yg9&Q;%OCo2>njB z`6yHMt1df;f#=$|tNz5kb8NgesmB)Zu#HzI@gDG-7F)kB8G6KO-#+>KI!7_>_E+xB z-`3oK)&G7tWBKN|;x~KauOa>?$cNp3`{@5DSA4m5V08e*kw^4g!ry(ioPkvRfBYCN zKaw$jKMN(x$L*8k_$+y8wB_Dm$@R(Kd{=z(m)arQ`I}+Osm>_plP)=OvNEN^u;mol za!#`3_|(@QIC^W>m$R-X@g?tPvVP6-z7aYrsVA|g3vB%KBrf)Jn~k5G#9P4cV|`P} zIX;Q^fbX;Mqm%dw;V;(w*%qG6cZz?I_QMc%z89aF`ujmX_3)QqvfRFMO2|ju4Bei- zxh`cqZsk3{%CCLZ!hPiTy5y(5U|jMk^542KV6g3IgyNQ*WH}vwuF@rkdnBpjS)AYP zVB6jZ6=%%H7+3slne3$gf2`l_4NcA{=ZQd4U%Wrd-;cQK6V|=K(8n^$neUR*ce2NB z^R!;-X9d-{*wC*3QhEngkB@#%a>-+T)#9`5V5n95nNL}AeB%GXNR}(b-a(B20qZh2O9W510QJM z0}Xtjfe$qB|C9#I-g77I4jN|fm`@0qJkoNM**)e3UP)vw9B4Ug|F-Po7f6QJYYKlr z@t^U}H{m~JpV_tBeEN;^%%?}P%)mzT*ZW!~AmHm-KzRSQdD+LulJOUu+jU-+Vaz{$ zj@f?hE%N3gv+EBn!c2X4j^~Q(y}RvX=~eZv7ld86|1)Hx zB?n21f4pyh*K3mo65o@c;koPk*3UaF`}hu3WvUz(CS-LvaT5Q!))V8^)smD%xk)l0 zaojPCii<8Z+jkW=oWvwm8OI$?IwTj7 zQ2QDyk6k@G`x?u z<-65GeI&R)x4(T`AbZLB_T2#@eDX)s!A2DNF_KKv=Y?JWg6Gq0&MT$=o7ZYAIQKK8 zVYGb0>^|dMJ`w&3hwPE?I-!8Y7P#qg1^yC{T>l>=`*;el5B~ePq&8__$4|iA*~4zF z@Lwo-JMQrd{|E8!B7WCfNVwj^A(^RrRtdjAg8KL#*sQ)!vA(PHUtX`E(><|O`Xc`` z{aXHqJRBmw`e_yUt=E+N$$xM8#8<|AvP{`me*1Yw%fy|>Bn!1^9E*0C)uxa+`R_OV z73&YcYP1|{wvX9=4lSf8OsYq2zykvZ<<1(q5n8 z$V1{tvMcR1kM{Z;fV9_aFx6f=$25^}Ev>e5%$4esEoRPz|78_mSJ+&*-^{LjT}<%f zTG%++OL*?3>l5>X0g;~FkxxPP5AFP&-H2(7;l$6eXyIH>qB~B3BdMPq4-hsl`Cak< z$<2RwD*yAmP{zf-V`pmo9j|)6KkxZI3V%h-9QE@(7ycKAlBkhgIVoL9c+voBsOs8| zCf+pL_l3>usjuz)NHYJ4>jGL6SE0B4LuS{X>8|2ke@X<&_`-qtuhBCVnBDP$aD#cO z)&I;t4CUm^y|kikePR{w*>M%GPwC?tw?5cNaqDkpYWdRjC%1!>`P%T&61j<(X> zF3~QRf45n2SJ4e7W(jbu1u%v&&{fn_b?v#(lwR+S0Ln}qNpdF*yfZe?5;D8*$eHvG zJ>L1ezNsr~;XrGy*&Qp$rAn<~XZ~bsB&k~;@A_Y}>rZH4Cl?tK*AKI;+}ltPP#yc2 zcy|K>K`uh#U4LYRylgKz{2{NFs_NRXQzeS$3{J7aZvcKgtGw`f=%KG`QkzdM@Vu$BAc8C+vK?>yNc4Bvz zw(k$MtfCNKV)RJ#|BJMH&p7Xyefys}S9pE^ka&wXgU#!km;L6z`pxcPYsTLF`^Elm zV&_NnD;zi@y9%?{W~a0*l$$cFzVmdT#WcGw4x8P#;w;R_#&Z!9I;N3h0$drBJba2> zVZpwBlVLn1gK^^a%P4Kv8yF!vOdR)TUS}}tX5KTi!|c5^kiGWT?N4WCuYGRkW1`_p z2*6Z#Tm#kq*89j`ROj!D*N{K1@Jaq&SZn3)Z(p$U_v)qY{C#N$`3tA!FY4rPg3jN^ zT=~1W;-ZSKS1RVjdv6RVW1S-oPicxK3+biAIZ<60A9BfzAxOS_fF5yk%Z=9xARNX! z<1p!X_l-G;#lvK-x1}H)>v}8R^=7QQbdxZkr1gmhC9Lb&?yOkXIqf4Sw9FTa4_8fo zw&jZR7jADE7e6IY)fFn}hke20^dS4qK*F3nbnE1I$Vp)S$Az8NWH*p(&uTH)~5lQyY=ZJX}n+N)pk;QCsOy0y;ivN z6FTJjxj~Z5pr5Z<*>UTqM)dO=OFxnLDStMf{%5eNYeKq?&aiZJf=5SXX*%k&^z+UL z`gzIH#SBL)%V}vAcBb@{SMUlv(N7+>m(}=Oep68s@4Y54$L#u>SWof0x1J`i z^<0yas3yT!*PoSwXz{V`;-vL75LRVb&!@$DF4fkvp{2@P_!qIBZDv=cVd(+SGR0QT zMi0gDq_Mc19!np8{5$&irB(L(|7B~T7=0w>@`6Co!s*S@#+e@^%O5(_SE z0OiLzwlSp(CUxL#FK4Fdz|~&<_)fCD+)vxFG*OHu5_5U6q?F&hF|GWg)V~~KkXrWb zJ?1mBIm*unW|CeeCEQ7vTM4y}5yaAs;3SU`oZ(e~yZnEn{4eVI|A$k4G?AFg3p%Cz zSKN?R{!!}xJ(i#JGL`=x!VXaWbR+nfM+cLFrT z&zX@$g+s_$)z)oNuYaK`V zj0Dq2G?Nk@C(Nw`R)~}zv7zO>^iCk((*TOS4M6w*f2I6ewEfc;=<=h9#9UsaEcO5F z=hMocx&GgG`AIZW`F}~6yZm#V@=urYryD_mM+YTd9mqPHrH9vcqlY1@^k1V-us#){ zk;H|($XfL9%C&d{$@w6oVTvnYrnmy7mLr*OJCcQ1gR+Za`G9?V3`cUFGe~Gh5?7#7 zQo~WoRenHW(ht~5fVfDfiW5Lu3@=raH?&lHsy;t3U$uYyTH9f)pxCD6E_%lQ;O&aeC@qnJ$PPd9=*=_Oq{kePt31Lgk}tMdQZD*sD% znZHJFV1NF&(nVtSpjH0HhGh9Kmi3rPFI8Mr)wNGL!#Njrz5OIFERfYDrt7Y46xL^0 z_)J-%k#(I*xPFzrHdL@OklpbJ@#4(j!*Q0O0y{s-`ukOhs>yHOTs`R}7G;F}RLx%1 z_2$X|YYZ%;GRv>%`YqB~p)of-Eo%qy-Z5Y6+qeI5S$j@|(c{8_FRq!{+jL%F=g(BV zJMv!T#X+K=oO#(djxVU{nwC?MeMD3ZEXlE>5S10iyLP9nHY`B4QPH)jV(87v?6v!M zn53I1+GMHdva1k_soVf{4VQft?^|D=Aj9uMU;nDL*hkgZsrTCY8m0^x_4Q-q9YTHW z<@ZlSPnTV(^#vV8U*@|suzy^fRM^8*najYAOSiA*HFv7Ms?zOi-&bvYt=xD3ecg_{ zL!+-Mv9HY=w7#y;`hp%}eLZ*4yV2JJntN1zZDc9aYLBhdc}Dx1iM&IouVw4CzUsBU zpodsr-<saI+LVY#y`$nogUas{8J;eIDw&vaF>m1EJ zs=m(TEP!QSxzu?^eeJyPkm{@KX|1ozw7#IJ`kM5T8V+UQu&V3dGCk0yIXJ7Kmc~#kWytwm3``m|&n{T+r&9^VGgj99C zu`<9OtTk>bg;n+55+L>so05a)*%)8H8a$t;fGG1P44!8OcCL2D&4-X@+g%>xW~Gdq zmntYXIdGnA4V>;}dh2{iMc2lPp*I*e-`Vjg>DTr=W$AI+rIsEqg37$K;^Ij!Ny#>8 zC5ql0MINjI|Nhye0w1##s1%o3U+Z<8gVWdH_u2Y-V8CBrHzUv1m-rb0N$eoJZ(e-sa_Lc8m*>~AhO!Tzu z2}@72=308X44T?Ll|_m7`V>Qhn$L?1>t|Xiu503i*Ex8QedCyqa8ZBKJC7etWGX^! zUgJ#)Z{j;t(xysk#xR}?@lS1Qa(1euz-<5HKC}Ie01ok_m&C2z@WZ6$SHtQ!DWRSD zYJJA*2j8nBr1pzRKlm4vUY_%Cgif~(Tl)SHg~_1r)2Zx()OXFwcd73ab%cY__r;v>vGn~g zYVg?@?r^B&XusrpX(;cojrk-PhrzRx8?{`$TOd56Znr;B~Zeqrf*k+}Eh`#zaA zygz;a`gfMTpW`Ne27S+>mJiavZ~or9)Av-(e=z!fh*J}mzJG-p{Pq3zibJaJEkC#P zJx!(uGTzCw$hs zcTyY_E?3Pth?C->{QGApTwJZuJ(Lr^n^gD7`Q zMW#=d{mX2`a$?j(#IBfwsE{|eSQYa3Uu3M1ChXuKE99%+Nmj@cc7@2SCbaVa`k$*~ z9kl*m;ViSI|83|bqyDpyYwQ30&2J`){dwoGDT>SyEG^A6XXv zWBeq?U%wZ*qwD)X^P?G}=eE_Bo?n}7>G=(4MU6TNIj1^t!x@D3p0V#ib>5qLNVGi- zU+@p7Hs$(xoLV1)na5HG0s|Naec3#17!uFhND~m zmKGa!F>^F`{_$^T-^l9!w4CfCI4?Rqhxuh#ZyKxq#_YZ<9PhpX{9kY0TF?K9w-)q< zjx~E{2Rg@@3tyc-d1L4pG~tT1K=Kmry(*`wefw^7QPsXVkk~#Xr&_N@dzI+yX|wCn zu-Ww@CvM}MxV>9d%F{d7B)*^y6}t-f|4RM~4wU~*KM}K#{Qs1Lgs$9g-fXMas;6nX zp2mu*(fE3A{n_lC<@EBzTk``0X79{E zXBJz9^Uv5AIwiHL%BiPD>gf;E)0lL{*=s-g&iX*hiPGtslV8rhi#ZN=7+*G} z1D^c6nLYitJN3Hc(9Z0&ubPv8mwor0ny2@MfaXzs_06iU=GS?St?G>z1S&eYMMHO8 zB1UX)>#m>I7w^3p{r{2n|M#l)7ij<7Upc?4gaum`_2|>8y6OtTGkRM-NsVwj;>E

-y=n=%Uh35N)6V}Tw_d5^WqOIhxfvPfZ1d3onVHU8M;y!Cd`gOpJQqFYqq zPV^Q|wNNKlD!#XTb-x^?eE<5PfB8%u6mY>8AGck|0TPF`yjX%=d(}2VYHj~o2LxYdq=Yu@nb4<*!#w2FUqq%f$>8f zNMrloZ2zC#eGC}!F^v4>0Nr7FEQ(iDnc~L~( zg4SE)u4T#LX4gN6Fk%JTnK8K|24tCmZ%*P0lR$?ZooC_o$_ z2oMCw65t$wEPyP4kN}?m2myou#t3i>z!-or0AmFRXRj?EThUoQ)(rfP{bX0IEg-98 z{H7GUTrrqZZ_g;Z+`-BbYtnKLV$y&`v)7`IPNfZ+!}|Ge?)(nLu0pR}iAm4x2#XIm zo^y_N725XsLH+$_>u>wULsx%s&L-OR#}Uf;k(Bd|cjoLEx5;71UVHRA&jh$Ut?orl z&#~`m4{_~cI`^Ai@>~E_Z5YSuZE>vReC9=F_w<~NG@eba;1%6mDz#>Ih+)8!j2KUVsONnNP=97<3Ju0o9#Zf+XjtlEAjM*l=@qY<;4+s512bxQ=C z(9N4Cy|m-EpLSW8y8k7|*qr%c`#y%OA7N+NYmdhp$?o_o5=B_|v*O)f!Yt+> z=$SzF!YX3MyY?q8UcY~TWyd@7>jSTLvZd}!Ce<}Jt6xt%lqPNdqhhr41MLH1x9ub0 z>~u%es(k}v?4@CulHz6Ns2OQ$a{4^M&`zaE z20+c8CnDmM^QL0sbPm4LlF zP9Y$1HZ~CN+Q%d=an5&x#*QDz9Y8LufBU4MQELya6_@XLp>wa>_#V9AWgOF~0(mRPUatV-% zU$4LaO|AmUcj;S>RGB2Ny340z;$L))qa8W@1%0P_6@bpaYPTA3rt@$(|8 z`<9#?bI+t_INqQiZ{H~6=Z+Zw$rA54_FVT@?k|^TKlu(5oy4Q3OJm>00GQ-vyo;nn zN57u*cAnu$?{@%63Fg9gTBcq|E!r0RnRxfnV#HPL|I3>*@}gDkf2OGqi6Kwkc&i%C z#dMQqYuO)ltn71Whi>=JNetpktdBURn2e_RD;}4YE>!=& z_q$`Sdc5yNkR`_+?-k-G^xg9BIJL}O9qN4mdk@Zky5fmL;Xl24c1rr$YY&%xj6r4w zin{=NC4Ztfx?P5xwa@LCk&@v#qA+!>*IG++d?)jiXS1JtPNqyOP|l-cF?**5-pNv) z6>DSk72Mb6RUVG!tPh+(dOqGefjQM5WN5AGdPmmo|7N?D&zoP~wNwnI>+6l!KrDOh zd9kj4xQ;4S4o(_q|DQn1dGYSqIFY;0#|js|n!Rv)kVxH?gU(U2?1jfiB~w=q%W)w4 zQn>RzqBsk*PV^Rz>HK%B>z_k^romSAhK?{NZ^~}(3Nau~{zvxR3-}<;tGTICwSnwY z7B-VPnQ3R={UmSqmf3dp-GReo+Df1N#1{zdw0OFICgrG_{1$I$>H1w~uY61V{C?hD zvsQgJdAUX4^^N2B{w)#OH7xJa>9`tS+U&k`f!XyfiD7nst5uOF)VBaif5HUlcrLr+ zHgb2q99Ui8TQJf9$;rcvRKZ|DS;Yf+8m>Al?8q zDyfQKQ5gkIG=UkJK@>r}AhuR)y@0|@qT(fz1Tyb9HCnA&udTIjTU)DbMXFU3E(v(S zfQqQ~ipm*A0kr~xn%`%wea@MgLlD~c^8BCQ|L5~aW}nMGYp=cb+H0@9)_2Dkv&E;j z21h`lX+tP2#8~y5^$N$&K%ZcE$TGd%EYqLmZL>_5dicdMoio|#VF|pQ?^RHAhLQV_1*9) zHGH2)GR{N1R`&sm`1WvM1#ieAo>6e0P~NyFx`gHMEAg|f`QrnPXE0HW?;_?$&%2 z2l}=`g1)T!>D}RO>W{+RSl7#m4|DAQdN6N>%jBE7zSf?~7EtCeN_qTAu99Vg5mIKg zNa!>HUoYuDoDHV`(ACm^?K|whpKJfU zLKjAoZ@FElgNOLIe%gi6{4R{Z-HYi$(|3{3%7VU=a(>@QIlu3uoZoj+&hI-ZyM5>M z7>Gngv964>7~7Ax;*!>Nst$$>yK|18<+Sw3uF>SCX!5>t8XOJ098KOKA{I?HNMA>j z$_V?TpR@gC&F@tjP3eVb%G^$whg14CdM*>AW?9FoU8nu)_3PZhV#PAE#**KJlPj28 zCq&A=#0M9Z9SId|K5o9+Ando_EoC!Lz8V9krG#(F)yaG&YQ{k{rH`MUJ~D8wjCR4p z$?V57Ir%$1eR(jDxebT1u?u49%2GtcuX%3oT2xe(+!{9OucCb&k-aO8`f0W5MvCSM zZM5cnpC689eUSbyvdXHOj9Q%~3Q*PReB?{j>cm?cx$6jLsKKb5{Y7BYSBhsanm!ek zv(c!WzwlP1ZVR5l-W_Pfq=NOv7}@BpJ5)`eZ;tF??Rlhn39Yg}8fo2_c#r2* z+kA|+!DTq|m~iL|tDaa7HZWZeI}o&J_r`dsEMV5-;XAP$3&CM`3mZW_cljE z%i}Yx`9mExmp2|SBu7GD#Lu_pUl}NGBu^=QC9cCS;$yjhx1t7(b0x0BFXAU~p(__q zIalI3{32e)g&-HuIalI3{370;3*ESY(zz1X;TMkJj&VGPtS_6Ai*#0b19qcW?qX(n zkabqYP<4$KNl*K;{(Fn~OvRt+wW7d81%5 zaq3NOWIkb9*+-_-c~j6D%qdct)$ltC^06WO&6`s~_X!nNz2Ho_6HI!7tA^57A@bt- zea0vtI`}W<7uV!aSaI#r!^!83eA2NayP}A$Gm1!NkWbvHLEXA^YH;^R`Yc?%%=PuZ zq-Vtgj33xxkTaVM$6=s7j4Behu=tS0B^(a0=6ge3!aN|PH|~ZHSN0l2BZUKvr>S39 z%4no;m&Vi7Ei7d;Qn+j5Y3dc0G8!oyY&=b!!cs;fg}XJLraoaQ8;x`dvq@D`ZYqf~ z)|g6!sXCP~g*43KT5itC{D#d3Gg6%?C$J=Q8`q{awPziB#c3`J`pg};@ZI^y^&2}p zv*!2dQPFsoGE@D>Ycp?)F7QAWqv_cgBHF_d@pmLUB6TkZ>~4P+-S^Cu(1<1wD5~@oK*2s?)JRuRf!qS^q7vXVUmycf{PP`#>Zd zQJ)7cl~G#etqquk%}9+Xug9!;i{QMgq}XA$<_RpL>@rV$ys_%Buq)k`O1GKc*nmcT z>3Zr0N4gJ}TIxEG?gPy)q#GPPDE!Q9Y^YpYdye$BHP$-{D2l>Dy&=`#52t$_gFdk8 z?}55V*%#rm%`sdSR9n4YI5qP7;p9m4RI8__K0_y~o^k}aym1&Rg>Lrr)K#jd*V+%})X8r4EB*p=bvx0H3FJg;webD~hpO`go zM3;`xdmR7?yW!`3c5biYFsYl^X zB{kC2`F+?_@tCMqXJTDu0R5(67eKC=$P2smkHWo|gnP}(y(=Drg9|c$G+i^lS66RK z%hd8o=06b$9CX-1Y-^2zBFr6~@E8Hf6;QO5ckOhF-ORM=CA;riypg`X$?j!-JZ5*5 zAIU}8edV8S^3MQ~y*Tb@TG9eho2r^j@j?5~QdBG7T&cAmuqPap)ZuUW``BTpw7!5`Saz%~*={f%x!3uT@{k>{D7?(OQXD zMAq0ijEwuz^UvTPU!8rD$**2?5(?wZnB-2I_iGLhm#vGR7)|qjBz@hnOm*2>WIxKs9B#m94{1;QV}7wt7dt+0jvPmN*EAw(%ST1jA_b zX1J`??ya(z@6+n^`Dob(^7MO1h2H~)2Y@A0b0FR?nyO$O8!5@0bCl>`r+pQOXIgc9Q)Re{fF+>hXZf4281^}A>2$%mUhcK!Z^lAM9g^l zTDw2qEh7E|lVC3$v-Sw~^|%s93>D`wVa7*{RHWm?WeDMjoCAGPQI5es7)(R>kmp zT}IXtrOCh_53-Mu>T&R&dsuF?2|gsPJ{(U^#iqr9_^@c|5+L+moEeLhy(r$|Na)q5 zHRb~W92_1_54S+qjIuA#FDX%qBDWRopTMFIqoMa}_T?StdGRIlPU1!LkbMmF7zvQk zy;0n`-+z01VdfEfd@yr&Z~pI5(DR@nh+AF43{#V6z3kpe)M0mFSzZQZH#k0ck^Smi z)5-$T#@V*BGI+m+Q0HHFe@k?dh$EA9ZybbH{VPZoEkBYK>J5y$>B!9zF2BZQQAoux zff{9ZjgIMvhSt@v;m{sy=t%T~zW#X<;mn&cya?vuNLhO5+Zt_+F`aX|HHP!MFO8N4*C}5bLt@kB9BuVQP*bQ$5l8LF`RlYT(&Eml5vaNhwGLIJ3h=* z!5&xNMneCHFLU@2F3Zp3b@}0AhJ*@!lE;rlo$+H$&EB#%_a_b$kKhnZt`lJ|U?==Ji*e%2{2B>xl12ffrj|t77E_P-$YvcFE?ZbmDzsRt_Ve{Z`$C$_ zD2Q5BA3&GR`)DWDZ^UU#4d7hKKBpZ}F=^rigs*pn4&vo7n^KQxY8(Pst-9Z;U!h&V z28!&-h8EVK^_a8TqoJkM!Eppcot8eJN33C4{PI|+<;Lo0VEMT8Sr(?zbOt(3H`cz? zc>&7yooLyzY;WzCa9sw6#^>=dlEqqi!hi$=6{PBo*?r8o3$x86TkX6#v~t8TiI>BK zVy(l`402}~+r7=_K?Jz1t{vlaGGv@Lg+@15T)=(WE3K)J&5Bl$&1Eu~Sq(?SH=W4a zj3IEic1SJFERuHIAE{f~k-gvHPb9faS=2+{t$(_6W<)|QvkI7Ti7?|TKQk`$F@s&k zf0qwc{|X%)O5&10JPWIS2vdm(4ox=dw@Tq@@_G`pJ z>I#_+Ylw1h7x2)~#_C`s-G^BbMZ+$lRe!5I>5RS5svCb04tyboB)0kl1fvbiwd7ZH z&sdyrp8>m{-9zVp<;TYOR3jI?6M*)x>$fr6w@@yu`Ta-im%uT<6#opB9qkMrR~}?P zmHCJL)U#=2&VIl-mwgqx^gM?$0X;XG$oz}AVD1yrgMEFrO1#^I20XywbOp<*6~t<^ zvtZsQyBpp|J5YYYP;P~`BtCbZE!a<%x!;=Ko_*5Dec6_R@9{CVXd}t5WLNlFv{8WR zobTRH0u8fc*sw8d*f1X-7e&M(vB1)3U|Be@H4<7HKQSEI8b8XKKTKNXjw@OCEsXC| zK4NI%b9tyA3%+RCQfFUXj9;!Mmbj$4`iU=#Zn!~s_S-xOjx~R4_UC!j7`$}s+|7vWm&ofY_B}W10<%x4P3o5b{ZZ&; zI)~o51@y}9y)Jm2s|NClDX$n-4-xYsejWKCR5_eJ zC4eNZ4hOQ5<8Rh)*8Ex6!}#;phU>Bc`d%`2oo{7- zsq%?g6HQMuX3ces9GN^cZpE3J3WS$7$ZqUQV#G@NgsxAO=EQ}c;APS|tNY_TO=@-c0}o{?~5uC z57XXoMbnqo+CRG12>!c{g77-Zn`ao8rg4s{G4pqOR-*bu-`t+-&uQA9nHk8J7u|t0 z<>`~FJ$;fR5WOc-bYS*G?Jv=L$PcHYp!b$sV>+!&1bvOt=qJYEFYS9z+vitwF1=xg z-Uh=@qfbC$V$yN+o_#o9s}O}EJ_Z!Zr4bZLObsMYiNH?D85@{AR@*Il#IRE7?d#Bc z5bMXpn!LG0h1%%{x|)741JD($_H4w_FF#ZIJp1K1BVxD(LM`F+HKNFf9Kq@B+EHq^ zBnHYJd(LiYLY_?Aa<)zB#Lh2J`EIXhV$BaV(S&t^JJdol)Fg5O(T4M3YxHYky7aWG zu9l@!Vs|diJl?vq8M8t_Dd zTY{ZH%Fw!SpiTF6BvB|@wkR5UAwnWil*7GW!d(d3&H7bYb6Oi-S;j6I!u=Bqhth2d24rK@29#HLxNzJj=Z3SXt`cvXFS7#sc z`d55l*{5^%m=Tia?RG+-h0gqmU-XGRJ0Y&R_i59xO)MkBZNx$2D;&;0B9Z>}fXr{$ z4^GJ0i)Lj+!VfhKqb7NU+M;q?2`ktQ)aXp@nGf(zYkX@(vbloY*F>qStJ_ERtJ#BN zoZ=&DNA82-xV^bj)~aT_NnrTd#Z|IvXM)Wcpfl03Tl!YNp#q7gV=1?@FK9Pl^KKJY z`r*QeR(d@zt%7hR(kf27gO;_i7g$up`F0w2dtPoxqc5N>S7s%VEARz>Ik1UnguEws zUqfcyOA?3d{uTm73+0!E*4_0X(_VjlVUp&;$=ny5sC%ypUM=worvC3PBXi#yxqNUFS6$c9ueui_u|#6@c_o)GU`$Qp8!ZmkeYK&cMiXFP<9ENBgYpt! zKTVACW&Y~=H+>G3uGuJol65Zx?5}TQCo3s{6zX9ot{q>h=ooM~Fzb-Hm+iY|B8bTr zUu|{*w;=X;&dUfrB;>Ls^rf4Sn@`Wm#Btjtij%Mny@Hde|8 z27kpQVuA8|59oMOk3#L%oNWvpWg3yFH8_x$sKBTRzTjhC<-Z#_hp2jjoy9dl z2Jn2n&J4bl&G>%R=XsJRag9qEFO_VUtZZ_>6~A3&W0SbPue%Zsm@miY%4W=Xg^xr3 z1~!BP>!Sf;&NkVTcq;QFC~ndw2o~LQ@=xXjX~mJx=JlVPZ=}m9SH=k#Ka-Uz3`)Kx z#t-Y0<5m9yQ5Q>JcOW`)JtS@nm#v0LHM3&)>PwixSg?#7z}m$AY6jMmLff6pnt;>@2HFiE^g zVM)A9^5W!>D-6JJ^0H#zOJ=@&#to;}JyvbMO@;?qGom}*wZCRpd-A!&VO#aHX}Pb; z>?1ONem#-RXACV|eP0+q@lTNr>1GFokg0y=aS@3T#Ii|D5jA4ixu>R;I-Ab0tGp=i zmTZAF9#HDoLz*AMY>GvICBeL==_g`o{7@;=dJ+$5&b63L525fxc*J^YzCuTL_7?a} z1ZcnNH}Ldohob_PWq&JdH~UrWf;tU(Nt~KY-|Smi$)!wv@#{^B&KuSoNxYK9^Evg+ z@tT6*G?Ct=NUt^TI$9oM7?s4*!GYt_!^^^DZ&(e#;i^!jOYJuMa0pYvt$KZ4_TY(| zJzk_5wURH@?ULV!td<6h$1fnakpbGY38qZ+e8p@Fbs)iq>HK+sVtZh-9BN6XF?R{m-8A#y|!8 zRhxCl3k$LSO<2EU@Fyla{sI=>;Ht)>!|AF3fe1qa zMH2Nt14IlA?DLrH7LyMjJjC@v=s|4dNEe$BHR!~D>XHJnmuhjgQgz~C;espB?0{X0 z{JR2u#8%pnnpHCS+|$z|56jMV))z!n;b~|*wIlmewX@KxhXw4PUWVS27(8!;y7Hnv zt*<9mifO9?Q|x4P;TJv~+jhA%Z(qbkcFxN-tE`BE9{svEB+|itk>m7=)unjxgo#@**;fB!K03j9w<6`;-!v6C+>G8 zDtot#=SbiU5vj4V7mh5A;yIN(5u+%^xX!QZ;cz6Gn__hlnVVlHz)0z6$@JLq{JOY$ z_IC5@J4vZ{3G=JfaHsIGjtQ>FYIsjf(^H)}gn_6C`+=h1w5L;=0-FDsl0iUCbl;K7bY05w_g4=IYnw~N zjItA-=#tky3d~>;#YB*s$t786TWgQVqJx6DgRloTN*8Ug$ICk4aLKF`#=scRP#5aR})JmK7U+kfL;=f-0HjXLMXBmNtgIyY|f z-#Ej$agG1R2#ff9^?}0gKI&Xw<-7iK=lZk0>o+*pf8)D8&AI+l-}Q0M^=jYsAvejoa>W(*ZVowPx4*=w$8NoFyHl$o$I}Q*OxojH+2;1h563) zcYN3HcdobkuFrL@KkBEVLy-66b20gKGUMqmd8C4cR{iUOwaQ}G{`b~L?hEdIv=gG)YQK4*Ol2(@F)WOK z^XqVEvj74Kn-c@POmdl>_TV8|a0yCEE$zUrHhJgS_y3jtX~mq_dH+1fQ{(MP1Z;ce z^!EL;ZE7d|GtFBj%X(v@2?>+4F4Sg8zTor}X3Gb}K3ny&-V(bd8v59p_oy~2w-|M- zjMdhh2jmWm#tUP>S`Zx_jHMDK>=Lmu=uU!Tyfhm6W@gFQbg#o$eB-DHy@Zd&js&ob zza7gn_H-c@8&YHmN68v{3KxvoGmRWGv3e?6g98e!9_KJ=cmE-0|HMM?S#!m|AhByl z6YR};Y~pF@K8IlWUr0|!WIVOFPspAkPjSE%{q>ta6!e$t>UM&FUbj!XKpf`m ztciac!PE6b<}9bjOj5Qvxl2MYh%~0NMlh_7)?kJi>?+bt>#cczlhrjI^0Lj0v`Fan zNa1Sf7~+oP2=7p{HScJ7G&N4v*k#esn#2qphijzM1KDWVnhKCM4aip{c>&Sw_!~0& z@ecfCzgj{v^^20W$`>_t+gVt--<5@xe9kPa_~hTI-1%Jm)>zrv_|4hdb-cP zP+-k@RX!eSsXm`5ibI#hjk~xkwY*WJI*H^T+sX;s|24Tw=`^125aN+%IMt z`GR;WuP0!-DMwxMEVngbeN>7WB_yK2~nnUAO|oTPP7 z*cMHmUphESJ%i+{{e@2@_O|n+rE-I%(IBZ&!^2Cmf7U_3c3&*HkS%nL*N?F`QXVVg z6OocwXlcwE(-IA=$Xvn_!s!R~&mmMb`$o%}iCBzUqnAny6pzJ{OEaT*a=iV}@4O-K z9s4~MP-gUgr~5shamcC|n{YSZ}ps_u!kr8J$H53O~z?)AgvkZ%dG6tY_P8Bj`fc=o9q-q{ z|MvEU^Sc~7(ti4EnerDE_U8}s`tyn)v%aX!kri!~_er1SdQ|a0>Cca)KcA3W3Ick3 zjkG^Oy;Zm;01t8c^J6gtPT+&tzl#51@vmhYN_$XuSpMl3Nuh85{{i~$to{!Aelf|V zuN3-neA%(2VdU3b0#b|FWz=!7mpcwtCU!IqF5P|{bR@n*Zn_9v$p1qH^76k*-_-A- z?}rnWz74zD=F74?8iMp1wW*o{O_3kQ^e1 zlA=b$Rzi)KV^Ay4l3umXD5Q@4Z%>bz{{0`NZ{UvU`}G8+@9|$2a8LGgT)Ff1bWH5Y zo@57icbO>mfd~34s(Yo6|DVM@SA>}7oUOL0i&*dc0NU~+>sN21nc z!x21Ig8^KHItJ38uXr9yXAL0weK!5FR`kI$@`z@)A3jU^Wu)2EC+N@x1^y!%0yV$c zK1^EwJJI&VFhKtP_Cdpl?`;17xBapYd!UB`{A(f2K>;SPuvX}Bae%FPgbFEpq}^WU zOKS<SCG6R#AiW;q`>CYkToD}vaYmn{nf?uERY3=XC3l8Gz^BMFi74DX+EO0K6 z1x}mDctLENfefC`hyESN;N19V8xxeO0aVLAD=o`hR*_52mhvN~6qJdF6RKE~5?Slu z2PcndJ4I_DzEcJp_`$hLh3t~}vY=EDlxER(f7!&F&^oGD%);OtjhoQ8T=>wNcWk87R#+?^1fs4_~_4%hxvEj7ydZy=j)x~4e#WBA^-lj+rKmV4Lv=j z@0@<4&$jEY9pBGQ!RDm}B3Qz(9peu#J2p>3bML#E+GY_UQ>4sy6PfQ^AmSqNSnUXi zIB3mPaJRbFpiECB1t}2mq})7ObCK3~j@06;cI+5Y%91U}DyM?DS-~j2s1T$04ca3Z z#ed;R!6=^GDMqok->aWc3(oz-Ivn4HpGe#%nQMt4t0~2#TxowCA(^9Y7qE%3gOtNW zS?4gAgVsE8u=(dyoi`m^;vj~a;u1^uWDe$OyZfDDxhQ7vC44@!4;M{*_K#mw{aXQw z2_boGL)+DI%4WLTK*_;Q_gw%wwD?xj$8M_6)4z|uQJNS|(KCe+*6FEZ;6b0uHJ7R(1Q=&?NAAS=JJH|7<1bCS|xh%jlzU=q{{vQ5)qdiQ`{DuBht5s{2dR^ zxGgwOujQESmmg%SK%OXj^6J_jm2iizXWeb^p*jW|8hms+-VAy{sL2Yl?{eic_FmyzjcaCXAU z>z5AesE`mZ@FFJT{n;ETdzVxjtKlBji^N-=2qe`~50}SksOlvIUc9;h`M6T9O%8f* z$6J$2jRHvzuZx7fwCdg_uY?pqYu>B;5YLPc^Ed_&Kw}YgN9w*UvF05IqGn(v09rVH zIvVe|l7C~Nb@8K2{!C?Kda_(kOGKu(5kss6kM_ zqN&LOSosTrE@$;I-To3K?OI!~teXa-3-}Oa>HRG(l9B|YJ-La4ujL*SOa6J%x z)B_O-yykcy4y#B_EJ~I{(i8DPydK|&VlX)#n1_itAzqL7pj@0BPh_n2q|+$+V^a~^ zv+s2?e9kqcex+64z=j*5R|}7X&-Nc^rt@OvCwi|OpNC4lfX>rOU@Ii8`;#{3l~e}Nvy(*u(;@~g#GCIb?XY!Qx!Ti!SR9rz2x zG#;M44S$~({w@Sc8V^q|`2-=eFNFgy2uK6`ttv`(jijsKFPIG!gUM6jZx#GqDEzG| z)-Y-1m2&l;R3IZdGXjeQz3lKVnw(sm5m=%~pj}TFTnM7CAp7ClssmqB#LV7!64s&_ z6<}$qh^F)*Yd$B6v^1*7iPFftz&0%+!}wQw%Hca;lFE9+;=21}FIU2elmC#=&1B1& zF5+p~KXrrGf$Mx1QuO<^he10wBT?TDmM0$uhEi^N-=8c5oU0>c93RN+&rNBk#1$+G zW-7Kxu^hkZcDjCH;oq%tf!`=paYV3fRLgP9|YRquJ6)@d(zy_wc!~#Z5U341ws5ja@x@1HBXcZ zw4vgIa!Stn0J98`8miVb{by+u2Xa{%I*xXYx?dGO$QE#Doz+PQKmM;+<%)foy_-9Fe-s#kjR!?Avvr7Hy5 zR|0w(zvhO~D`WU&^vYk+E03vOxt&KHy#fx9&N!c$8Rsiz91eYOW}GJZ#f-zALx5d{ z+>U}s@8zOR3=p}aO$zoCB7r43o2&H3ijoZE7(Mv1Xp@+1DNts%d0Dhc%*8WYlqWWc93O-hZnvZ9D{d!OJ97ha0%RXx{Jeo+ zhUq7Q>*m-$7?D36$z^vQ^1Ay4!Y7BV6n$TjVpdJ|l0hm5DIkAyH;b@cPsbWd;)h6| zBi5P!q!R@iSKf;ww+e{Mdz@dGS>Cy<-jXNIlXSI@$IJ{J#MCdNPXNy;GCEacv|@6h zkd{%^c4EJt&ng=VcrfMoo$}x#J{ro%FIRxcRBzrlOA&iz*CcS03bGb17LwO z03OTCqO*pr+@8J1(+JqhYlgjD&(5)TZ-^My%sa5w-9!@BwosRS>2UE$0gL;k1P`aL zf6V?we;Cl*_cWr|LC3Z7&~YDv%sh15+of25jyp%&@vi-Q_9Bd5t%Q(5oMxG!j$dt& zJ)lY&-mOjYkkfrHyi;9WJfeBPy06jIK9|HXtcmO|@rq^UFak|wi$G@J$$L#@+_P7p z{;2F)Q`z2BhF^?F&XA;HD$_)hUesnPm}4q9FIPd6sUT!3Sow3ef|C3SUXVuMk9&yM z#qailY}UB&!F$M+BzH0!VYw8plftVMoE5m6iyPiCKJeqTQL=ds-VPsl-K?t(K5$#5 zTK+UkvUi;PM9+WS2|h48pn@-4L;B-7V80xE;3wEf&H{(sKucQfn?w*?gy1_}gy5eU zyHaq1ml|;3%+)gV-FPYk9lS!?+KXs%#s6gm+9_VJF0HpJXT(<|Ea~)lgBL6-XMJO7 z-W#ki{rDkwe{VbdkRaw{bEcLX-t$8W@q&X=ySK4J>aTn$GtN|n92guDttb1E=RLvy z0w2YL(AnE1y!~DG+rb(B&ie=PT`yijcGg}c-1tZ}GI6=AQ-nU&IjizVjHr2mI;@nO3F!7xn9=1GS4?9yd2qhn0?^#56BA$IDmr^hdzO~zi ziR83|1DQ6yt4t?MR`3_mJVh|oc-F~l+!pi8)?QQ4BIMtCEjEE1~xj`sHRgIzG< zQza60FiwvH3>a|)aUYR$fvdcWcOqKar;%vZ%rxXu`b{o~0A$nb?~xxl$Ryl2*s-?sp=_(u66KK%bw$NmO;249eYawlb19~9shbIORpFOC-C7iSN4$6Fp= z@!$fy;vf;caf>y{EhbpkzHO+)fy+3vXVFIQdnTX4zUUvKpxj~TNjR3?e+zzbpCKN8 zvAaLm>HetFB$_+P^K8Hvdu-8b_V z*GzuEa)lvr+`NO;XY8JXI!1BF{w@AS(%8Nva*ryq>X&ZE;pSc7@V%9(Ltt?H8jrdE zILd94!(34#1P0iM+c(EISR+Be@^+k$c2{+ZM*L2X!5IyXN&}IQwhdv7AboPu#)%#6{!Kj z*QXwOBIgm92<| zmQ@Gwo&c=eM|S{3so*{A2Vx3IN}pNv^W{ZOj=e=<#$)NTO1`d`G$EWm=j&+bbxuR! zVrA+a7RX;^dpZ7tNa*=E;QN_i<%#=(_l6fP`FwUS<}-D0t_xPCj;Ra{O{`|WJ&`5L zoqV|D>o}(Hp^Ab!Pmz|fV^lw|7t$*FVVUTM6Jf1L^%#|EgKb@|ILd4WG;^~f+miEt zfj6AsWJ9;!MS?$kM|KrPp(B6h6>o!y?~zU?>iEyJsuN~KLd#}-2eX!2^%{>S%xa7hWsR|Jjc;>|-QVYfW4Q&;8=HN&IexBhGou?3~y~AnN z$8uKGH00^V9{|wYmspQ85Tw@lgvwL|A^Hr=KrbG{zz!>adM0q!uz~V?wp0G>R~hQh zQ6Q|jk*8Jh*}>ax&ZqA)p^wgS zm?8AFLf>bJHXnULobazXdz(j#{BY&dY=|=RTy~hMOly8uD7=cTPdEzd3!OC)`J6Rg z^y+Ep;ST}Fdb#gRys4g<4)0E=AQ~STvsTtjY$SJ5X{%)KzCKv&M~q!!^PYce3TIdvUdI* zMcMu6&(y514c;zTok9Djhtha(FGB(W>Ei9mBHPTo5)Z>}p3Gk>v%gk3G0gmk~~-GfY3nlIjt<`0ojfiw=SBF&XRccFQJGe0QZAIP#v z=w4N7=pMEsy1nt4qyH{5j+j|?O0R*WBbx<_P_wY@Dn4j}x)#e|gWsHKpO47=q44`u z10m_s?(y5Bdw@eX8Jauhx9S;}-!c<^7rmJjez1IbnI!T;dX7RgAEu|o%m%C*YNrz< zrJM&(`fRmooiW3P2KUJ)w<(q#5`*Z!1QMCj434|d ze2FXJ2+6GSSbF&Fz)JrWasUAaWoR#1I6C7^sf?eT`o1pOH`u>A*vQOBaloGH*+`PLiaf>`JxktB1~&TE zSuZqa>vdc>(5@0Bo3D?a7$~x|*jQEZYe||l>WG6yv1s7uLXX@76o0yq&Y$zT$l>>P zM5m`eh2D3SUIy-Vj8}S%U6id;dg)+94i@I{odw((jAy}jCN^BAb+KpE?nz+xv=`bv zv|r=6a`?___qa+zSCm9_{EPU`PYm#Ydv_h*Iq2d$BR~H&d}sBmI{3~xaOb8hE7WZ3jV25vFMa+mFzQJ`QE+?1Pb0l8%OLuEx?E_jtX|94VIialp-?@};h$}J5c7!Vn z@)v9_58T-SGM4ia47~F-3hzA2z&m4p`QV+a>YH=;&X2KeY5(*9PfWfO-}zUb{zg%B zitoIUC~%=q>>kaNW%SCfvb}A8aZ|pJ%Kxw8J3mfMG9;;c7*RthqjLl z*7R0<=Tz{{hGrMw*^J9h@txlzvZ!mdflYVuouzdzzKDgS_|8bNKjv86oVf6Sc)o-+ zxCilU*0F6~UP~U>GxecA{%^;74tmZ5+8f3%;V$Z_V4s6E#{l~rCF@Xt06vYgvJvEFam$^wujalBf=w=o z+{j7#924c?26qS;^agxh9vHOdjb6^~!y|TzoeB{a{`1o#6Wat#fSPG}3E2lLJqDOy zrR1@12TVYGRVVq_Gx+`vaS3Mj*Nokci#=NW^yG4hNpSvAF2Na`guDdj&d4e6Jiqzf z<0ZGR)BNTi5u1^H!oI4IngafSWV}Pj=fS9PWf+i@H{y@aNa(ew zbqZ$+eG{y{E*I{?*$u9ugNkEDms1CjENMfN>BnK_Loj9yWkNg}F>p6kOunZ3DT_aZ9W z%v3krtrN}JdwMg~&4=ntbu0fIKytl3wyz*K1wntTG+3!^!6$`lM$a)d6=d-*4<_YN!~as-@J!JEeA>f%jbLzKj> z;!O`#wV%s_S>)z#)bwksd{T5%kqbFpF9j@_){N|Lt1{r99pFs|6>s{Hpo=%19Mo2> zwYheT=7n{JH=Pdt)&rdOzeW3>ky>OTCN6@2Cux5H-t={Z*7)(J^Qz1~;u7xsWvT6A zH#^%$9%P$UulQbtFw|#9^z>s4Fx2dul?qyApUt5aE>?%>`~C$eZaSr#^;aSOntP)h z&oijMG)wk43UR7`kAuMJ&cqzHq!5hS-1H*muq zKDAXZmZ(X)0;BpH3gUe_7mVuJJHV*ETApEtO7OED@Gz=>#Z?^wATQZfdpV=Ps=rpA zR@C6!NMLiQa3mCI;9CY<8b=Rk7K~sP2aUuVoa&#lzTFW%^&AO+5~>uR`hko@Bj({# z-#}5;rviHA!BfkywL{Qq@To`fj3|dMrg{RdST_cmTF!AvZWCiVagNNk7>3x>hS0Kq z%8FEWz;H_lI`#Q7370Lj8h%FeMWDwdk35WtP^V(a(f|*TEDd1QIqoXJwze+)!FQ2^ zw^I0A6_;8usez^zd_Ozj)SdWszWjqsE%4MV2i~=BBp!0IfK#8Fz1i`j1U|*B6$J8~ zk9h79_#pVyLHndVQN9<@YENWbdD#Vg>irmjyNXXOf$7>g+v8Kq!M+YYHL(_(O?PjP zPmRxNNzEAwEgjn)pE}+xFwcSdDxe;h>{sc3#YAwigz2FV}n zNpd^ar23=uksxP9SC)>C2JBeC7K?*t>+SFHY^E0v7(2MFva~p7Z?rMB+4r93+8e!Z zReNJ?v1e}_a3F0;cUy9r+8YjkAfoaqNvFsX}g`z9}8Y~Fb^;LA+_`sFZ--LMCc3gvfF|W34e{{;X-CRdO>irWyWA5;6rvi z9Ng?*d2XUbd$`*gtDGK%wlFIe4t2e0Z$s;d3)z!e`}wyNk!VPXQh) znWH@QGEa@SFFrSK6v=v=JwrW@Q`De+SubI{cobRjxOl8T=S4%4JP*Gy?kYiIWjjE@ zSlK7a;dzEu%?wFQgLqYBQrG!mgMBa+7f$M6!YI<9st`x+W#a4vm$k2$At;-3J$nbQ zI*C5P>bnc(>cyTtpbr{un(Q)D z~5X#e{PQd{}Xtt7lF4LEW}%#n~%2|{0%X6_p#fz1g~Yr!&{wej}u~9g@~*u z-s&hW`0!Tqu$DZa`JBC;KW+-}R-Yq&nR&Gnyw#oIKaSk5r>RZfIy9m3gL7u>_M$^IH3Fv8qROIgWF!(^|WbiG=el&q-4;Ir~5|opV z%DT7Cj2DoRES}b)jO5?J_k3K|uptR38Y<_bb-xk)lcV# z;al86(;%0s)hj9;J1Hi>+jY&qJ#Nfr!y>4>Wv4OV@I;(*mO^*a^ zix^YWu(lpzak}uV+QiH@?ct(|9RjT`+$>^*svA)Vv}(Rb#$iQc_sqwxoQN_z*yE>w zpgP|ktKW8l^GNM-$W+4cQUA{V9uF9j8mLUXNFSahqAivNEhK7_^GxX^BnX7nvq}dG z%<3R{AM-^JRx{nDb#jAn*g;YiFtpr&Id}WuT37J}PgL~>;nEQ~twA7I50MaP0t#kU zM9SLpAXtejxs@XwGUJ(%biFG0s|ONEl*3Q3 ztiRBh#g`ef_FyOlZ-!MJXYTDw0#s2To(uymo+Z_Gt^Ez@O9XHmN5gJ%0(ptOgsR28 z^(i|s+OO_En46zD`TjB$yhb=x#aWpBEHLo|2=MvUB=a}In0=~}uaMv$)Yx_G{iS5! z)U63{o-;u8us1^Ruq62Zz6*>>RY`KcdDd#UOwz(h?vHYUxmn3pOqG*YE9LU(wAXn# z-d34xcN6?MCY}WU;TWL+&RKILsm@7pdr+iYQ)f9((3-cu)DEP*HE$}Z?GiwL{jH4? zo50MApFlW*y1HYvFs{;M{8u(=j=udqhepsY%^i=X{64P~skd+CE! zjo}m$*6O0x@XX#OClVW5qSdTkKYZ)(4Kub z7eEq%+liOM1mb}R!FW{NYfpC8ta_OV!)2=k$&y;D_m8CF+)WPO(>%@D(e2^D3eH%M zUu?}UY1}iM9#0ppil1%GA0KEuBS4wZ?^W?Jl)E&ZK|tyFu2SaY?eSe3&*(~-DR)); zaLU2PGtkiEyGeN;%H0~z=tjA_l)>37@7`F^U6PIIjz;?jJvpz! z?I(@}13aKZ`{_FFCP(%(Pt#98^e^X8(@*{2tAsT<{WMAYNy?_5CTTxO+4R#S?I$Ul zeww8HBxTc2leC|tZ2D<*cM)8L4p;#rqFt|~gFef=h11fEf9^)XpU{+l`i;E?P16L> zg3e2iM7kD+lj*A|*%yAxYFdYZmZK?sNRu`fHCEM%Hp*P322OdA9{QwRC@}QeXLV8T z$(k^ly0|eWCos008IfZ$mc>}$iczuFzT#$52CVtPQaw$uA}RHbOZOTED!>{+1z<4t zrNfj3(vOs`7Qg1@KZ%}oowz7dPAi{u1<4<~0N*<#Xy@8cm+6ex*I zhECF^wgl+XQ05KFobOGZ@U4>UekjW@{HtYIN!%4r`B@CNM%9XpkJX7!GsmDA$TKU6 z8hHd}dt*2`u@)0p^UGH3FX~3hi6&I~gUlx?t;vaZA*v$5OfMzOqAI9UAQQ@JMP-nz z-aT_T-&e!g9=0qq24hl1IP(_IiO_1z2rl9}Ax8YVobAU@S!+L)bVT+Xm01$j7d1A> z*W@$W&;?W>aYqv$mkgSA zthve0r$zAThtiVtY|pM)u%EGOU`n%ugD0hG%n+lWxmML^9|HGOKgXa?x}Y8b z)Ex`dUZZCDO6XH!edbH)208WH9=0-54H@EdO0Gg!VccYH+Lvsh36lBy`52fdxxg9u z;byhwr~Uo`+VawV@1rQ5@+Bn8y7uo!;)#tU$suyKNwAD^W>Jr-WQ)%%3NsTk3!}Is zFHk^`@E?)Gl>iDspeFVN+F{-97tAsxJ^ z{?vFelWxtKNdl1~!zPN2gQ=u@8aZ*3Zmcy$D^vz1@ZXS;U(8tV}3?qO7zQzs_rYeTfKxu$- zv#);YS%48;XqRgqjvGx)j zP8u;bNV_xCtU92TjSccVKen?$1UY20+7NX z2jGajmDv{#!|w&KUH&V?!C5I^a6_fA2?PYkZYDZKrfnuWzd?XrEn?zO0h$RF2&4HP z8rj$%d3ZPIxWtCcaWZbj@M;NWwn}ds4ei+v2XIZ4rb?j5UFKyfHUg*8gur1n2$D#m zQEi9K_Vl;ZcIb}jFwTLx5g(_L^;h0jt3h!;mLPID+z%85IyuJ*LH4x_3lzw~Ub2@E ztJA$`D@>vV9^!}SEJY&0;%20&dBHHd$!`DL46|^a$pI>uN72DhF^{!Oi)o-Lhc>g<>HsU&rc7IABqP_EMeweYqu(UuHIJ~zpeyv z4O`O}G3xt=2}q|G+saF55{l&VX140BJ&Wvm-rxl8s$#Ye7SW6hAuvTvjGuJ3p87pE zY}Walc;GI~`%3*TePw|EE0Pb~G%eTN^1ZtyxmSCohbGhkSGuvTd`kcaR5=Zo{X)&N zyOGTL%2IPrMSG*wGG+^L6<4$1FfZ_f=57!XF#P+ywJZFq zwRCjPrp*gzndiK_`j8Z$3vXqAN#=@ z?A0+N&*M`fxEt)HPPyDEFL263o$_y;@*pj5O>WG~?;$n@>3yYRe1DF=42LEZ$M>it z(VV_E`Zd(`JU1S?N%~(Nx}i8OVCl8Ay(rXAF_JxfLI2jHA-wk~-}YKz`3>FS6!eT9 znNLp-6w03rDf@MFDh->GIjJ{&pX08qGg(_mjMSW ze)X%lHqe~!!L}Mwv|PQ|Df6{F-|bv(g>PCcMA2p2#dTD1&|kV+2A$3_cv$n_gg6bo zd)AQorG45bAO;ZzYy2hA^p9B=ezcKHKRs&fULq}G$s_S|8=trFcMB4W?84fkc$rBr zGnM!4rgZ@XUsifwGp&{6@rYr|VW*FYfhB+_9{GvgaKWR&Wc1o+@OqOO@14AT)7B2F zsOW|n?h!V^MQhI<&kd#h_9pW8r&%+)P5;Xvg$j(kXqD9Qy8ukV4Wf z=Spk&pqUP7&uHXjqZYbHogZ#={SBev?>uXadD(a9moZH!EQxGB(3)0fxeVw7bS1}jS9#! z0#V5Qx4Uyc-S;;&~rCs zhn}DFvw)uK<-Nfk$m8Z`2RunGZ$w(%tSb^z<|IjFG3)qr!%skUV#!q32Nf zT}!>ZbC5jc(la6X#ZBL;B=)rhsSz&%sc^aTG3=-1L*`;`{z6`&%weg1{xK0rF=|^o z#t-$$Vg9%Xtz}%e`q7iWf4{!W$Nu_IYe{wuJ{eR%UjJdusqQ4Hm0R<>u&epoK;l}n z_e0-b6UHE7{9geJtoeOPB`Nnh1#b$c;<8o!uKlYUG2Vlvfka#)CE4HHJ=}JxV}{7k z?dlE$LgpBR{c}p;bhtgpQD*dZt@dN;6amPOldXR2r_YO}_O3l>F3T*A%0hFI_X<)$ zX+RRCNsBILIU(C>(~B7*wf1JtDnR>YQf#mI#)GDIC%4h~@!{m~QXoxAhYEmVns|GK zroy}`;7S-pwhwXhVF4@D%-p|ek81$uPdC$MG*2LLJb#7;5*5hLIl(TXc8H&6lD77g zsL`&{;&86ThtcBk8bKs<53=X?(0Yaj;?K#2a{FN}aG2c?ry*~Ovx5e$;cwyit(Yi^ zQKaNWwHA{vi!*Gpv`w4iZ;Mq1of5QPE?l;+M>d>U$eP#4l|Zw=TMcWJr&xA2{DNyT z6A7bA$jKLO!HBi?U(iOnUf5taZgRL_H8k+Ja>8m5hc(<7%jPKDpwpEbF}P7hZxyhE zbJP}3HdC3~|NeO?*j+w%xqKelW!=R_EVHdlOpsy!S~=B{8L#4DaXI4 z;cbMAnIBTLUtVM6WImImu=hUx`s78~%Wx5iFi2$xgUDj}rf86yqv4Ym z%}S~uTsgTMfnn#}3UU`B3qJA@+Vp1ns(Ly({0x% z)2=bJ>vY-CO5%v#=UwQ*V^qw~$HdkUanW)&iSr%(> zzy^Ez7Y7pL5~jnq>Fs)x*d#* zP4>;ov6f7vU|fvu>hRa97kz4YJdBGOQg=G+C*Lz-M0TdKnIX)$P`kj@H%L&P_)PY5 zRV7DtkQpcfB%!==HIK60p02wEIxtTC+!dgaMu0}cqTU_2zQ0p|j?8fg)lu%q&osEA zwZxw?ZJWr2?4$NO@0y04FAf9b=VJc4kr(DKA6@Z{Jk%reIBJdfp>8ddjRf_u@8g=T z9D-CRZ*c#Y&hs}p&)=H&{1xu=uLhuq*)hJG5L99>rYvi#Pp-#jATx-$G01*I9+bJn zTqw7H;XJv}|Kv^nC$Ds#yvzUO+5RU-J5NscKY5h@$pf4xEB#M)^*{M}k!jI>{wLqy zMnQ{~I!|uAPSy{yaGsFd_S_&wg&hN@pN_G=L`@2G*ZZPjlMC&>2nu%P0E7gC;FfLj z)CT*3A9-=xabA>Z>0av0?@@QYPmlWZiI{jXBpY+4y7oj6?iVMH4yTX$1=q9tyY`l- z>GY`4^6W74?8w^Lfuh7(-XK6PJ?b*Kb(py|s?w~$^4GhBcOg^;$}jseFUKP!J*v`_5~ei$4ZU{>C5LF$$LUescsYkk*hw;xV@#{X9k281J%-ft^BU|y&2<1&7+LPQ8p4WvYLC;lE&Tk*r7PBB{33R_5 zD6-if&Q+lDdDACDnW*!0XnpBmF2KpRMNzS9SpRqF5GBy6e@r{)vI|vzOcgq<`rpen zhQy1M!%qc{IO5bXvW7C#4_y+)n{5{aeeIHS-gmyoF26h zhGVDEh{kg5m&abvez{gksbhb}gFN-2>Gq5DVpq*=;%2p1%hxz}sG^Gd+VjHb22lkM zi4|^DF63vi0Ea90;eRLhHwvh>dq3UneJQ5<&e0X(9_Ohw&rIH}MXo1{3nLm;{^l`$ zQlp;W{;*B9OKD7*5-7tQ-;_m2-qjppU{S-qi7UIrYUuMYCw zn{>diX7|0<4K4ETPTn8f`TMP>4l1%gWBcI63AHy4qCd7le`2rJF@pjyKpQ;1zdVFa zM_Iij8bjZCM&8N)PhL;#C$mPu%Y)3z)0*`C7VrJ*ck%tf+rMwV>h({qKj2nD{}hxf zlAi&RN3_iGY*oh(5*Wc!>)C#6#nx_}o+wrPv1E_z?z#DQV>Hy9z$di+`IHqzpb5har9uFC+_Ii!Yri>QZdYSxZe*J{Pn48kXsb~FXE-zl-!lO}wOfB2 z57ln1jt2>D#n5jKJgc*7vWI=Tj9LKkkbxfI6M4Q{?TD%Ik){H9TJf54&kuB-pYJ~Z zN1jjoR_d(c$R*l!Z$bSA6t_M8sQdiG^8ADOdq7c%4}fMJ7sSWM_lD&3Y3;RN^{n2z zZe2y~)_tpY$Aj+5)v#sUuS}oPUQxTHXZ0|CJ;$$oYxb{OcRbsw%fi;P1mO1i51|8s zs4=lzJn_liQi<&U^jQ|Op2h#%t10`Yj6XJIPA^tx{nGd#<=4#kK^1>vmgMj{`FGf+ z+O3n~qiVN~jZZR^RLazGCG{UK=Tx6bfM}rNZ}?si?2Q#q$q(HGts z|Hr>VcKknn{9n!w@IBLev_17rhbnziCeFeXJmrCJI%I_}u2Z6s58WVWZ@^F6_Y+6L zk!;vW!-l;TO&@gt4;nv++>RzQyxfgz!d$`8;2Ftgn2cuY82ByuYu!yRW>BlSuMpCX@A* z-}04T@RgtTl^^ky@9~vWzVd8e`7&SmJYRXNuY97fe1xyOzpuQzue^;(y`cTR@>{<0 z3%>HxzVai!@;$zC%2%H4D_`a-pXV!&^_5Tbm5=b1_xF`|_m#J;_qE?we#=*W!B>9T zSAN7-zQ<;#5K^L*v8zVeB_@)5rB{=V|=zVfzDeeL&^-}04T@RgtTl^-cA zYczuKk;y7SOz9KHl#qIx9?$N>gyKl556<`!qhtVOkEV_%4Mz7zmW0z|`g4**S2>F^ zl6(n#DnN536?C)x+DokU*p^}+1?7fL43#d!PKL zKHtNwFZ5?E%U-sA|5D%gC;RAo*u3A}eLnaHr@p-5pIZ+er|Zdu)dSr1q&0s`&yii? z{qWXs>bJYzVSR1*!5V51>z!o&I`cW>&j95*Vm-T0?bfOB5zb;LMBfsA-%5@lp#Wlk zi+xWApX@D|sz0F|wyN43de7$mszeV52f=#w>_BpbQ`=s=4b-{R!@srI>qWI+O-;NC zG$`OWs}g%>m$~#3OfB`q2UyQ`Nw(D4L+e;}tdlGB^C%5yb{NUh>FPfj<5w;oSiS{U6o|txBrP9&6F|Bs0u-?NWa-xwut|w{Xk7+z$AMxRkXbEp2 zLB6-lu6MpC%Y7pmiC(O|LV9eS>LQbji@76$T^(PJR+Wm{QtzCKn=Tj-!IbHt#w+!H*La0jO@-rC`L*v(>6O+RdW#Li{+sBf-tVNh z*iSEbu4?m!Qx6p3FeGpT(Iq%Vq6=>#m$#^>nj>4kc}{NHEsIg?44tFG$^K6bMMp^B zKwCM%H1e}So>{>E{GL!O5+4qLJHE*Ng`MB!oqcam=ifUc|9gY9;$8XP_x|1Qy}u~0 zeY^g>r--id#M8l37wl-!B(n4TrEnM(1TKc%dkIP?b)T?`4fbxGGJJ^P_5_ETA9cd* zNFKLkx?VCZVom3$sMW%HnYvlytRZWN&KVjA1SIZFo>*-4L+9b*<^nuloUSmLcZ?ckeEGQkN$CVNhEc@q5ntUN&n9l8g6z@|6EU6gs>&j2oAx{r+?OFrGLn- z(SNK*KS2E9)SsmKaQXtPZp}u*&JpT9W{ces>Ys#f+O|=odnlVdCHC4DL+b@(2NdN* znoeU!p0Oa-l{0S=>uCDtFU~vP3nvHbT*Rp)W-LUK^`&0(NJwNLD45eC`<9RLMMZ_i z;P1bt(~bj^G{IgA?;@#twU&H2DbF7$M;khhaOk)~c*gmtrt55gNiSo8r*?0C=l;n{5BY~(cn%j3IS^M|G1EkN;Qni zi{#{bESq$n+i(S1&tBP4w|;2&B97zkGfCcI?ik9I^4f2ERJeSu9B(RychIu7%Cdb@!LJD(*!*kNZM*a?q?qlKKjb`231 zUk-HUI4AyCq*(CfQkSq9Fhe3{%>g->KcseCD7%8`USHDNWc>Zd6Q@mAY&3gHX%E5 zDwqwpi`(yt!B?xFW#bT4(yKIb)3-X3sw3$eO6>dagM{TkFR`v*n^mu9b%nz!!myI~ zuIwEA&jRfPG|dKq+tCW`p3PFnWtd&3(s;KnvYMnBp|+aQv@&x(gi3##L2#ISmQMPK z`L4W_94sKK-jG}pPSy#ZbcV`wn7zNL98mjIl|+N4kG)8(|55ky$8Hk}(tA9%klS~g z+m+^a*W0-LTW;Hn-q!&TuDh|g$f`e-9;rCHYFLLE&)=A^1F<6M!|LMJ%2JCo^nXrk zqHpM{cTP(kxkW|@Tej|q*~64MAdO3dWt&#|F;dqSh=x9jkKxoat6t7Hhb`<@Cc1=E zCyGUSfw4%fdAG^Eq(mFG+M7gZWqBvIu2nCAeKNOahc`u&vx=!IetPY7r4~R?|Fjx@ z0xP3pU-K??O4S?{^jmjbXFql1AYuS%0mke4X~{lp5S&^ZOm_lK(Ry zKse$9l|vVF)F_FdBnnC(XaWgNa1cDWL#nY@uBxuCuC5-Jtu6_a`ykSi z3`Wr?F`CiFnk0i2=I@o}wOhPOP$z-0RUfs8X};za&q>ZSgE`B!OSE`)`-d8NsGyFo z_|bsJC`nO&j(FPh3+0q>54uzta3{vlm%tY_%Te90M}oeh*ybgiWGz zLLJHrw~m6CN_H-}98LEhXYjdjj~mSJ@G(x(bB(03LPV?rZ0pQgjc#QYuG8ZMfG<$b zLB8X7;j{H!>obfc)9T8S1Jq8>MyR|omDtu<1Lb%Ln&=aLbrW}ID$B#KZj>*>1BshU zxa3Mi0{nv`(2yV`J2mEWqbr!m{Z5C4!9Eupet?dxeMzk+ldo=JrDV^GlY8rI`L_m% z;K(bp8ZkPMufsDlal7LxEZ}cPEVV4L5|R5+w*oPOxOkAU1^8DA4)SLN`a~YXE2Lr) z|HaKL7mWL@YQPO9p1++x^M7QfZ2XsuJVP+mdz(~Le~rupHK9UD9)oy9a0mpN#VPiu zql;3t$rJJ?{sbGrv|D>SG>ggH!`g3g1pOAsypdqJ+O@9N;+9vXOzl?lyU)Bs4nd{~ zCzq!{#U_xWGX5<@wiNP%X)cFPM2F#FUNA8Z8`0b9l{%D9aC!);?prVjC&aofO1?;0 zXEFqTNo!;nnN9~_ZX61!oQr|L{u(kL<_6Tl_EZ=a5cLntRull*DQaDN9L=4YtmaoC zRt4|31tR6y*i#0xmOy;S*mS^@pg3FyE6ly0dOM5u!Pv-Yj`goh<&^Y5;%w*})q0_B zy$FUTTZg_duNBHc*R0@BAd6&ww4aH_`e*yFeVsVS^Nad^Mw{BTX3TDvic*N1IISbn zvvG0p3&&ngfT0a4BpS+C^>>n0KQfGdV}65MDFQy`;+0i3`B$&^HDcz!WX!b&#J#n^ zs`hoPOQroU%%*PpOg4!jZh+wQeK-W4XcPQSf$R|c91{GM-o1f>(vE`?jFbl1GaKdu z#Re%x5%I#|gpzwgq0EI&H%A z3qn(;L(XeX%l12{wy1|{2R>;4RU1#oA4EP%m@j_(2k>p$Aie_|#dl5f8KLpLYfj7R z?I7B;9-@aef~f5W(sWJ3x9K0jw-xctQ=DZ~T_cl=61v;4J@29Mj7Dhu@{va(J{jj5 zYffwF61!EsK8Sv3fY=_s@ZxV>3H5FE`-mi$g+n9!+pIx)fBaGk?lynCJj*XEyry~R z;^|X?be2Q(X7vz0q!FSsK0@)a7hmwlt^aiR0~5ueRXCoOJo=SeuL8QGaVeHH%3*Ay;~uZS#%vDO1OmiK5|v~&7)7pCYtYou%cl)_&h?|{?NS$hC; zk2P0bl_0;(OXpy}|hh8{#^rwMODsR3EVY*QD9aA6 zvRwLtM_H!p$+uo4H>B#xRj!`=>;pY+qKt^ouu_$|58I?`acqIuh395voXri<$}?e9 zz5gKHyY05kbM{!x;b^2hibY#fe+&oWP;wLsz6x7 zS!dtYVm52(GVJ?~|Dk=4iSZBH_Z|P7eczr#YLD9gG5h}U_q;6gf0KS6JokT>et&(} zOFxzC2)QyP*OAf%Do9~q9ZnfT^2pIf^dYJ;2p{B2%Yw+t#aB^Q`i{pRbK|q=vhw;J zA}js*_IbQjvT_XVLnSA>AZ|zFtw&B0DCfwDElp7R$k{e3nE#WKV2E?F-91-Au16!p z;{4Jv);Xvqc4so@1q4js_4>IEs^&>}Tl*XZ?mx1(W!iNO|zYld$=7d^MAwMG$m3%Pc3`~^bf!by+dKR?q6e9G7kr ztA(*F=Ja}BtwVa**7&5qr}X|9vTW^j?tYJ2lQ9Xa+J6;(T4zqc`GmLNLu*|YpVKMc zzoWUTLOBSPe^6aWfIfV#Wov*V~1NHpz-J#efy1MvYJiFds5;w!@Z zM>kOg@9V`X5T2@Xp5p4*sI#jyvdULp^bO9}crBS}BF|4Li)H3~S-DkBG^U_?6HARL z?@Tl_m{00%% z+^)O=ZV531oDZdeh3;4xbA7HCsncXNX0I0J%f0JZ>zbE7nL?p@f)`B!^lwPT162HJv+OX?xFMxk}!qaZ32dls>kT1(}#dKh}610K! z_$CiOs`0hTXoGHkE4}-UJqik>^j^lw>HQ<@C3`>Fi-mmv1#B{L8W%>|m6(_S2g7W# z30P!Kwq$8v4Q5F8=moSPzVv#}=p5uCR+j(YE=J!{?RLE{@7Eqs-f8sH<3a(Rfq~r* z-kOtL`>I<1*YEFH?C-V>wKDnq(J;jjj1-caSIL@zC4sVK0RXL}-DyK7gqE${_ zLn>#;epQa^_i*&h9+JkL?UcT`!P8$%)kYDRQ3hR@lENKvLI4@w-* z3u>a|?_lEU>_Le~=~kC$3i$Nk_HkXbCn)E9{1Ko;H#D=Fz}Y8mT6zo}=X{mpnRj;s z8MuKB3WOJNJF{snGNhHlmzdoFj#@eyk?aFUmj=brL;9`7513U`f~L?ziJn*8ROe=$ zd>Iw-lWNyvU4iW-GXtHrXA3ejD?k z%c{qMr}0U;i;?RmH6In&^Ul0UHQvl$P9DoPSFx?h&?<#T^nZTKHJCiqX;4J9FL^T&(FNZ3ayxcq9SpJ;Dx zsP#Tx#%n0r?R77dY#ULgzatpd#V}zgx0Tjp_3~2;>yawRC#l{RXBuCq^+*6RWH+{$ zI6|bfA~}%_Sw*0S=+6e^$-CZ{`FyE)D~>SA9Ab-=oO)G0M{IuLsEGmg!>V=?0;0mb z`eFYRfU%$8VPZ9}aW@o`RlJE={Uy(Rt*L+;mWTUzWmh2aglq;Tu<4xaS0I@$`$`yx zjw;yu0Qv%LI0L)Zuc%-RXW!c1AuAxDczHG)CBfVl5;(w*ci^HZ%WWC(eMgZD#;5xc z-!L)Wucgtp9|W*TO~35y1p{#c?btw{{@F(CBv8oW2X;oR#ZBXrVx#v$Kup*?1A|J;sO)WC=#dB@(F6Q>f-M-w* zV9fdXSDb;#*_%G^hBvV(4jzE zkXY||=&N^;4wD&L%*guzoP<#3u&lwL|0Qm8gl$GYNn+X551mGcwXeDJDv(l6=x9p0 z!&y#J*w(ips2^MQA*s$)SJ#TUTI>eh&hWCY?_IEN1FHN`L>ugo}I8T7Kj zHwQCrb4+UXzS1arpO5=^*?_PNUz{OtoFuXcWp>jT}Nf8X*NXKa2)qA*M+mC4#E`ByHOR9 zxpcoG^(L%MJYpOLqAVDmiH|f%b^U=mueR z*7Nv1-PLcxuMc_To9P0a$Plny&GdV%=@j-A%g6!^`6E)tzcmeMXX6H|VR!uRNulbU z_V~kue%U{2!jubOL}OiyYm~-ZGO;&BO4uQooYtCQEJpNObjS{^0aqx$<)7pSL>3v* zKgtic-JOE@77~`4LB-bq)PH+?P2>D9$mR#17b#vu#Y7K(h97EQ@QgqC}J*2rTVV zIc3TPlP4e=xHV!?#B8C{hbum>(d6@e?)C_sYvh8^kcMk zoa!-Te!N|f(S{7}5S^3FRbdwun!;!c%xCI6M~eNEQVn`%PPSDk8k7DO`pZ)6AN;<_ zD=%UXSU*!_?n7!0$8)H@sQr4ehFs&vo+TaQ?--k&$0=OwK)yq@ip12Ir!F>+f37@w zf81S2Pu+~!ujG$nXSAbs5;DQqp4va-bsT9=>2o3mm%gC)I`3~q?@gq4cch6)>4?y+x27PvPYY&kZ+rcW3DrAq{c6N6z%ba; z4kHU-BEb#&fQpj{#2bqXy%=$!mu6c>$soetrvgj&W5-l@Q43v^HgC<{j~+Wl+6bdB z=%?a?zk&}OzxG3r@A~%3Bi0qDXJ#((>NjtE1(sx5y-Y{QQVoq49{Muo8+zDbk(oE& zq6kXIW)m6qv$Jcn&|VW9a$J)!>VgX|sVT`4t3@x-nu6W@R9$*`lCJBe#y zbsd~IVxH0?mZi|qx~|gJbsovn<5pkatz%7Oqq4gZVe5NY_Cc)Box9K_6Z+-aLF|_qtO;GeHurW5h(qZjc;PtgyxZ>l-}? zvh1Z?Bb=h=>r#r` z4bL^gAD&qe5%Z#-|3lU6Or5YX!9XxYeqQM5GVV`vxhGF?57jc(5}JLO&tJ38udpM`_=r=K@?IH)WY+b{SEJf5PTy>a{hbNbolopPIZ7K3yFXHFledB@pF z^e!?rf6&|OSzp%v!>ccpo*vEl`s!!u^MkTCB_ufhU-6%bP9`6GhsoMO--yaxAIdBd zEz*epEL}NYZ7!7|8yCeHI=|;H`nUBIWxgl|r{jhTW zQ(IMBqR!lKKZQd3jt=G^*i30exvNBZ!Q9|5X6intQi#($qq32>JM$i*@?q!mV1%0Y z>d%vlQp#iJ4CQI&d*?}|AJ0$Y{~OI)pVE~7FZhR3x7Z#2ZmH0+XGxpCU-4r}?b%{S z#t7MB$1pT%fn4Y^dGy*p!sM5E$azh|OU@ZKldItnCKtP`K3ywPDk}cwIW#X_o&8%` z4=ikizq>zKpT8SfzY1qLlSX@c(s+&CcG;?>XVQS8fG<$7@W1gxlM_?@`ZG=w&cyRZ z^i6uS!>R62>{ym=eWxx4eHrmp z7N614+;|jrmF)fE#^a7~J*>wQgVR937t`J^?3InU5nZo28M_}#kB4R;C%-)7k}D=m zop}85aI#K}^BgYOb}|CVW^}~)zLtSR)4fmZtt(5+>BOUcSSf>>Ff){AG<;^UEO9A9 z$*`vy9e~!#878uq6993_zzbjHIEz-5TAvADnZZ;v#qj|&ym4fDl`aknhLJVgxQZeB zI>dMokMN1od{(X7XI?AZo?!coAghruGQMIhOi%`Ed9IZ zd&nBPf|YT#beHr=N1{&H4hiS9<%Xfv-sZKB8j!bQnOI7A0PoIAZTq^3`5ByYY_s+W zv)`lK#a&~)g&j!O>0$E=PYt>L4riOKo~H?SWhs7H8S`|^i2C^)l!V&)FL|1b$X_w~ zOY>ziQm#Mc5P{_lM*$aZj63vL-fI2vU+|0kTe~}(vY^NFt!PTO^Z5$h{{LWi+g^sM zIOt)ccU&rZ*YcHGyEz<}W`C#o*S?Q<`WG6+zFAX*w_N|4wL>0+0bFM*O{+BPeax0R8p&Qy zM`ZZHH{oDxVB9)ZYz)sBwy1fV4pwNJ^^bDTB+@lVB#qi_l9lRWWXPow~Jvm zvq}&Hhtv8dPaCgf)C>|*2T-Et)<-Z7z70AVfI4j~DHYh1rUvze3Yi;rHA*8)=P(>& zmoS~BevXX}mW0A`8x^E~WfL~)YJk@`Pv@9>x( zZZsvr42JuBP4j&JseT;e%M;rNuHgEo`1ThUq2N8A0MTn*7|b{U6%>HDtvO7;#4v-) z5DRby2nnHef*{38w!Pj}%Y|4fKHwH|s1qMxM4u9B$dJ4Gd?8HC)_0X{tW!v@%=<|%(N}W9&KsYypY<<{GVl6# zF38Z>`gi*A+HqVpOyte6#D`xcoTe;ABYLJto!Xp&Phzg$US@zmFm}izvYNn*$-SFN zfy6;ePf<49#5itZPs~Mr6^XM?E6(9Fu35yP5jHfcK9eC7R-_aAZK6*jk!QD!=vo;> z%zhz{KZ5HGql(niJj{x>rdH%dIl{{0L0RwmxkOK7bWvZR>S+6T49R9aCv05C+B_%j zOlr1Dl=XCNwU__F@ATh$>?iB7)cMOgg5!JSy)wRxHk6?S9cQK9QHTrAFi9~nkS~Co zi8#_}%pZucKz?#ws#Ou+ip1N=ev6G|!+eolEsO!x_#OFfxP0GOy(P=H0k1NXS8PZQ zWT7UJIuv>bzgdXiKSbt5-$uO3GM21kmhR*gEZDRZS@TMb0Xz8!W%bVu)!%0MKE(TG z#rKtAR^oBx<6KbnT3lVXOG%jSN%mB-_P-y>TeM4vm(A5 z(Z(%)T*e#>3*LJLy~}{Q$*60wb}1j>h{Re6mj1;!x7zG}1-TjA0poX^DSW37-_sD5 zh38;fk=;B;vyPo0|=?j^En00A+wio09xwP!r3;ZO5-QQtjhmd{2?m1%cNJT{|4 z3y?HEb+Gc>!3yRt$6&QF=AQ>6wG;^tF~Ars&}f|+FgzR5*og66CW@6M&sxmyO)#2e z#(<69p)Cb5OMRR8It$lyZz9wyq;yjm`(56`GH5K?3>W9sC8v`a__KLrA2{G`JRmpl z`v<}ee21&#!47~rU5d6S1z7OIPkbi)AU`+aHhbR(EZ-8Kca$a11=b39tJ28tEy|4n zZ}Oqk_?-aWEkzThzKyKY8knz3{uNcul}2(Rd)d*?@Q4WQ1w!waR)2-!@3Qy-<;JoS z!)V(&*JvBeZIeSXa;s7|9ziDYL7Zx_B&HA_OJH%$+_rP$X`GkGTQrWkPfHeE&x$Mi!LJJi8ruw6v zHDi_F-)HX7=Lz=WJbOog{{?xJ5_35X^2Wz}&}tQ~T(g&`Flu{1c|+Ww)II05TjTJzHdp?LKcIVA~}S>dLxU-d!b5S&K2u^cztgQ|t}=UTwaA zyEZf(`#ym5HxO-4@OCdKQyg+Y^s`LW2OIOJK|YmuQh$NB$&27M$|gAJBd-`~R>a%P zvIXbmwvf_ivGjf<6mPT27T}QVzPx9^-a1r* z0ODzLk?Nnk=!odyRd&gOP;Rqjm~At>uZ?Aa7Rk#uL@cG}-+@f{-oV3_K&Gkir}wR~ zY<8S(g1K2s2|ler8KC8qCNF{Hp%Ib1^-wiszIXWMfRY7Q z<~Au$Ua$d07W0Ww8ree?@Vz`l9sY)fcnhg>%6#itq)o|!Nx7Nn@9}1A+j89WhnBua zt-Y-EH5Ey@?+d;>5Q^a--q+Tq49NPNqI~4H%a=h%Kt=E;kXT(>y%nu)ky|4dNX{In zZ%k%9TaZx~%FX8g=Ib+*z>c(Mn?#?|5q7>8XUYlM{OgC1x%^u~{&j$^X{imUI{{>5yMT^m=hY-^r- zUSivhhgB-}tA&)|j=kEz{)e}I2uniP#lOS7()j*IJk1_HPIPw|4WmLJ3df5a%YaA0^n-o-D%P-1up~%RqHQLz>K1QGUvw0NCQ|&`A+uiI}N_r zWZy&jA!EWKKv3iRPtW)&zhdp(-t|c_#>c$rM$dZlS1d|&j<3OfS%latT%QpQh%TwW z5O+=^{>B14jd*}`ffITQo^O1>Ye zMb7D#W9LepYn6>e9Ml1rMVKzEe0T>F4{PzT|wL5t)lY zewjO!z!WMhs~51wVz!~=S*eF?8*o$@5e8eJ>-mAi=^!e$D#Q`tq3lUyUa!LqQ-*|g zRUv!|I5=niUL@YmnLi_XBVY5}pV}ZUK|QrD4oW@c6A9}|_Gp|ZmCCcN7d`x;>t%WB z1=8x}+Vu)Na;@={djMTLbJFm%C^W?La%mEH!c&4Cki&}^I%+Lck+%!wHP#7=)r#f^L;5_ZXIkZiLT&=|gNPMc+87 zM?S5H006d{Tr_a+TBi&#{B8Z=dC6lu35`*d5g<5}7@dW`2(YpJTPnZ&b!Ki71B2b6 z&p}#O`#bqnkYVXFgKiMY{A} zYd}Vwbb2S-2aQg@R7si=8s-laC!$|u*P1Du0{@Vv34{{okKQoLuhZR1-4{iV#ATK! zZ;k8g4|*)Ka?)c+{k4XpCt{C$gx=l3)UyhHkx7|9%@?q36>trN14Jan-h6pRh~}Mt z+|((|l7^CqY`R1X6! z&;aYdr#$1!_hc$_#kDe@(VqBKuXk~NegIDXJ|_KvoF=C=gEE5d_+Z9x*b7TT6G*x7 z6DCc-b~8?SRAz_g9D-?eKr`rCRD1^&^|V!Q#yX=xzSHORjH<1ItJ1?eFZQyO+F&GG zS+m`KIsB9u2A(+usLD>UzU{*`AhW7fxs@g6wPfcXp|Y1%#8$w+Io5cc!xU2i>JfW&$Sfcpr^ zp7GP|_sz_w^!u>1%=fl@*z+rYALOxf_wLt@Xu`T$F?A0O&*rJ5 zz=L|-Dr2MV@0nvco@Vou!XbDiQSdetoVYNEWxybkjq;0iA9&pi+{bwoM)Khz(_w%X zzNn3B{o2+0T=1#A%gZmcPt6q(Xa4(h=lxTU(DfII#+H0<4uETUkYO{@wDLal9TU#wutHv}*Ik+kA^9vAevUF_wIh z?ufFN*Y@$dX&#W5c|nG*W@W_}XV0stm|CehQ z+V-y4CpUWMKjr^WORf_13HHigFrMat*Er*)q%|=N=7%4hnyF{Peo)5OvExNQ8j%xa zJRpDZZ8cdPM$s&(r#(|AFp<`@Y1L8K4+%KM+apLykG{k?ZeZamY>58o+9DMP1zcv# zpk1NxIQ|ddfg&A|y9G;&h*^pWri%6$aaVfYNd#v^tMrR>b6(;{cTj&^^C{^;8GCR{ zD$;7DDS2jmVlZlaeX8CqcD+l}>M3Ja*Sm`{Dt3ZvkD;qB5#bVYQ(S`%ihvzbVA6Sm zO1?EO`}@*nLTc+w&UUi=Z<210DV1fZjp`DP1FZ+ElZZTjL;s&EIwp)R$nVi@4j`^<;si zqWxq$s64h6Nw+TNOqx8d8TRmStDA{Q*9^j`p|^;+n+3&$X~&=9JggS7sg=&;)OKj# z3yQuqW$|y&Na!GyKG}y{L+vsawC+&!}?<9Mkme2Wpv$#Fx!bw;Aq}~U!`U+EofwWNnKGvQ~ zuxg)7{5s|ZyY;}Dc)Pb6oX%A}FGb>0^k2=^agFy*FCEI@c zGa5(*POTu#=_TEUMeFg$ACEvRM>-k}#A?_vm}-Nzl|CqmjEzd|!(XHisVWU3>G$ZR zSywiolR{(out9yK{B2LNH(Cv}No$l*PE-X}jyrB5&v(d}h4I?;BT!{DqG!;&>ebWd zdV3YBAn^KnMz7C~CtIoY8LsdIk4SscK9dW2e%C%?#^#V&@Gp>PI`2`ovatfUohRE} zd^DIb|EFc3ANta(aJh9TkZ?UXrNfa>7k6~Mjby(W@Io^&F!T-5iAfa(ySxi9(~bcc zIZAijkpLgW4TfMt^X-wyqL=7^Sjj%T;XRpiI?LgWPVie6XYSBsyFxrR7ol(&TwHjN za}*ww!c;9UYF_zHDOd=|&4rEe6|NWx#6O~AFxO4zdl72#y`UuR^gRykB-?$2-VI7j z$S}+6l{m3fO1y*;W*0#-D;Gxe33Bi9LwKs^7d9*RE+_HKlb86H5$IY`r`*cVc+*H$ z#-P}a(2>Y~#oJC#1~KI+!opIPGt|eh!IL>Jl}PJFE==~6Ae@DaG=n2xFK=UVZ#)G#I^XdysFKW?Op+(teQ>gaKEiFx z9%!x1STX^kIT_i_FesDTn1iv#;CI-pA+#ZvpVXNn5x4{Gi5KP+I8|-BAe^9GFRYm~ z(akT1gCLRaSTvXAxcnpsY!u+v>~PT9-S6Vb?{@h>)`oj6 z;KFqSy*3o)Ck{~t`09b7McDaF8oSN&NF{OjGc-;B4Jh|GFg_NNK=1= zg8v|61s5t$ZMzX@#qQ8j)C{6S4&_Jt&G%v42je>kYWKm^I3H``PXb^K<4vqzZ5Wk+ zL1J6ii|Ch-o{Jw8dPa_Ahk*9hp1M58y9^b=b8*u_GUs`a4wG$D@&%IM<1Gq5ZTGQo zx!DnAt-t7bC(-##1$Rq1`=d(7ZO|zF9ee}i9^!t0bVC^EM0CUY0E6U=PYVU-BD91C z2LOZ?C*t=IpkVea02I&$TsZ+bO((ZO^aRu<$o&GWX|XW{ft>9p2#{SELm^2N9d3Fe zq=`KOQbUOpW5a}`T@bdc>3Ll{^0hGYJbbl!07LP|B#yp>iO-Tb)BCC`1AyZLcxzL< z2_UAA+TlP-H&J$wchru7$XMl|Al8FNw^eXk0dDOh7$=|ly`#(CIx9z-r=K9v*;*v+ zKS%n=m>lJ4K6(zRJjgGSKq;U&C`lN(8@xAZkLm~DEK+d#o+2dr9HZ@_b1~ph?hnC- zEBM3aIf6e2k=|u=(A1gXd+X}7;8)_Sm5nw4zf{4$zEI#_l?wk;?8iZV1W&-g3?@<7 zY^?|%mA!&c`S8%;R$7c}K>)w9%@4x_j7PGIS7)9E9I(RVj z6A*5N0@Pd^%(u*e(zI~=4WO$l6>aYam3n}AWPd7|^WbS@yrvflI_)08@T#uRDloI0 zepjmLVl-qFa@h&ZYk4X~7NVGhL1~P?f_FqOjPqpat{#8& z{4Su9KL0E(a7bvjR4u}bt#sV6Zfk?t+TY1%?);U^nLxpQUKp(e{q8Jz(~>#AqJ3L` zR(Rk)K7=D&Z+=_Z7O-#I3D`DYuDeI*(gDMZQEmnh9=;q8C>RhZO-?Km{J=JkiIZcG zwj{glDBMJG*>(Vc4j`dD^W$X-?!A}~ths2=Zf~S|{yTK6)zm|`QuVw)`Q3!1?>dZr^h>-+{Y|N*~r}1O^vGF zsKDK>WV?fhp`2?wg~>O7GZJtBWy%1&6+XP#1_VC*7na60M|5<%*Ea_Q-m-(JDE{O4 zD>zBWckL4VX5KWxuRR^T$#%syShQl^4Y&%wI{`)Eciqqi@mqSA;CBbd2XnJCZj0o3 zyA{O_4-;^I00v{gIk`2|GCbE0s&^Nv4}v151+WCOjbTpgjFpHF`B&TlQ$^ESZM`c*_!mH+8Z>Bl!SU$g>a(-6Q;ROIhQ1^;<+mK1)on_IY>z!%9T zbugrUGjN5Yx+bF$&7+61Vb;&t0^UQLS03VEYJLsBH+eJ%RVCu+(`H2T2O~N>8(-oU zCI-v{a?g(~U%T#*e&2{m!iqtO*w4RE&!_PDWY6<;>iIJeIBTfq`LpWzu^4Zwujlz4 z>iN8b1ltcFt{%Gf0K$#f6D@wmywK@i4*ky5@DBw1y+T$sZbj;OCEXdxb~97qzjr#v zZIB;hr{)W)rOE=@0mV=aFat1UDO&)VO6&o11NKX`8Bk~b(t^qx1^~#4$=`gJ!!7fi z%>2Rl+valU5-0TaqFoFvqGvy2n+m2x>-LVQgW(f>0 zzG2VoVT>5QWiev8j|l4D00CW0;EzmT=8l1!w6^{Z^$GywGhq^xH)0R4{bbH5s`24= z9q5eZsD&b>;k|JDE?_I+z1W9ajBn*psOrZ+c+?yY-G|mXdusBSh!w|c%47B4qz`|` z{AfJ|U2gNuV)Z->1Hu|A!>7jSM)mw7HBN8ikB1Ms0W!ykGY~1Q$EX{J2F}C@YG_0( z3K9C@49_`0T3vpthoiRVQ*yFCU60RjLu#~(EY(*Z_}K{)`}`qmwX(kvN=B_~V0cQ6-7J&Z#y)|5dw zl7lXTa6Y%1lR2Lp3ZyU=(V_Sa-m>2P#gT)G+XU|}Z9w1WJ#tp4p8pH_%X-bD2Mr3A z%0H{hUkWqAdTpDA1Ne8S=Qn}_tV;Gene!ZBwo;sNJ-^qf=V3^x^^5x6p8fh(eUD(w ztWVVUa?It{JNT~fOy;y~jR(u*L5b$l3fKY|oeMEl*rW4iIk?S&S&8P~;gVFQ;1ZOu z#c?r9EQCQoW`1ir|6&Cp%ezxJfL#7rH=<)o628WQ6K(7R`gR)^5yBTBl`2sJ&P0h% zJtfLfg5W0uy!AVjCa^g~6?+lIY*Y@@#Rjn0nXq|L>}Sv$&`2)3*l%SD+Tkqr98`g7 z_jHBXcu%|QQPLWNzNr$AC|LbGC7#f*-n|U4z}&jzm8xV%Psw?@_DP0NWDG_WEV`@z3wTI#}a)MQ3{+A_jpP)V~Is*vZmaR zrFflrG;q+%zJZWUJjNgc4*-`LGYd{V5N)Il!(4^^W=PgvBf3Hr17%mCb12jUh3t{M z2gR(j6}<$|v#^3tri}w=jsmpLh{nlOq8#TaK(&So=o|v-rb-B)ixr?XC?k|HPjCPg zI2fIv06pLV)QW&!Qe!ND@)e+~96(k-yK7gIhJuj=eA#~wpcN=)<*5<^=mQ8o%Jg&q z-LC+lYbQB??pJ`?cmQ2aK<|6H_BRFSdkDJf+HeP>37C%*M#B}LH7H~gyMTZWWC>_b zNIH;t%!Yw!vMpa9{GGhMhl-$-NNMO5YCnV{Sqn^R-jFn+@1@kjPoM(mUXWcI;G|-^$byKO;VXW}Y<)TqL`Df*P=DMMVo`J+F2jg&J6gGI zbDoo_DwsGC4OgIq8lo3UR4ejJ*x>;f2qyb#JX_g#n%Y969sP9kSS_N(5RR9!MOw6N$)V=zUEET30}mjz^jx1Pj||HvnYnzSbvd z&hnw8c^TeDx~30U09$W7p8UQ!?^1APVzRv}23l63L6G~VENSi^EwOi0p5bGc%v=ET zF#A8Dr#;NRO*8xXUpve`548h*gxPO*nSHg)SBlxsLka1Qxu$RWsJ#ed6Z9)W5pyx# zHpJ|m@Y2id^H45@*^TH`3NM@CU*DR-@E3!_^msj}rqgvT9FXP-07r%wzD(VwgC9xP zGZdaTdg|>i2H*cC?mgIdEGM1BKq|if6dRN1+WDwrzKyaTzWAUvIQ8&h_<95oXk02Df8Xgn!?c5NZ-OphBCm{S39ysJ)?K2N#m7=cL-;AvGL8S`x!sTW0u0UXL zrfwM(Lu_DcAF?WUYm#NFfi|LlidL(u9c9atD)UDftBE;iRr?9cp(44#Z@r?nv z-y*ww#{7Fh!*H*OMvRw&apxLGI|9D&u4$EL1(E@5PaA=vH$q1mOQvO(g!fH1q9YM+ zfN#SPC#k-jZbTnP-NGGa@3FkX3*d2fqIsYXt}1I9?lmUV50u49dHJZ7;a$PXpBesM zxC18(%$6vRluV%mJjJJlCJ}^P1ALB}D<$@I-WtiWM7avYtKfsEGyjRHD1c)%*^)&G zNsmsFJO7qrOR@75pPcxAqau#n6_cuQ3t&kbnYsdzYDCKz68o-#YMYzgFQ}_S4`z=6 zJfJUas6H6cNfL2{&6A<2#C6Y_3a6WJ)nAX4Ap1~*tgGEBil7mjE}(R-o6$jZQ& zy}iNsSH)A5{*Rai$Q&}O2UBL%HWzdU|=^~ET0IMj`C#-tU2crW}$c+9Fc5^TU`E0X6SxnBHm;AKQF0qN1p zvRt2aB)+mglK+bC$87V=9#mo6TZ9%lA{g(bGGXxud0HkcYcy(`g(G0VIN1|#-jtC6 z{pVbu8^Lc)@3fph`aA>z*0@wi%k)2qflP=YH6J_)*tlW2SBggsVY#e;G}35mHdEb2lh&}9Ayt5TJ^BEH$YTa188KemhE%G+$V#&754)$$TRod`Z&0O=mf zt)u%%NAMk#iqH30nKQBucSRz`3*gZNV%iXDi*q(Z^UQ~!IVJLrpj9Vl!0b6l;f^yV z_PHqa&bTH*GV8;N7rE_%M&|83H7Qiqpv!dA1Y3KS9p|L0c1o=}I<@Lao~k0nh@VpN zyVp@pWWPXA1RaqcC=a`qmyU#L#3mM#R7z$_WJ5&IF$R&Ll#Sfw;AW=04){oB%5fMb zPiD#sEY3UF57gv+fz0O)caq%ziI#Tra|m#424k?LMbbF)e>*Rw6G-@}@TZ)THZxLj z{s=up%w4w1X9*tFM^wE0B@umy=~uyZFul<^6}kC5CISxut`n|CRhQq9tSKd!O6E<; zltIRR9@ZXW@3D!~2J})Rb!oQ5mnE7$GF;zH-Tx2XP5mn{le?)~<4fsl_<;8UcT-<% z-G%SmXVhr#IUxn@_wknF=FKO{2t%HCQR8+g^NE(?R8R|FC^ZWm?X2>mZAP@02I7jP zoE3XQk!@y_`k^+a*TalAe_fb23?wnnHJJa?>T2^h%YTW5ye&8U{kOE=N4oIXmQ^R- zZVlgi&-J%RWQrSeaVPe&d^FB+=z5Vy!5oGkq8haJxgJ&v+yJ9!)ky?wmN|!y)U9h9 z>HUgt7WHI)#pl#2y6V~dZNIFMm(Y1oci&BhHFO)Q3O~4aaBl`~a2fzNYm6v+!5^)^ zLNiWUZ({D90G|bT)>>=R2l-Du??1zPt2MQK|FhWs8mkq(g8178f8Sbg&nEDx`M0;6 z5N(e=j~wTb@1a75r`|a#VP(w&@ec#}YiPG1YgTr6*H9z62*Vz+3d6gQ7OXQ_AW{d@ z>J!j4+^fupVLr(S&-V2PoBchE#RnBPB1^j6BTjAvMmdpmu~{YLa- z{z?vk@bJC$0u}mtm$SUBs-3vrMQD8>^Uv1<~}8{+}(Y-5S=V@-@)iL^*>1mc50z1_Z<1H-d3XI7Sl zXE!OY#5DlRvv8>>)`1*rTG89hRp;;`Q&?L-Lna!sMhoH{eHzUuMz_vKLr#=Qa(r$Z z%wOBhz}cj586vu1IKjU*7k2tL=5u3!l%_)HpJcLk6_agCW3tO(g;c~><91(b8c7s? zM~zkWvE;03Vddq(VNeWhW9YCHjt|12Q{uvObAY$TEbC?uY`nU5b5L8EG_xHm6DhIg z?bcaSaceld=d>KrWrWWA$jW0!BLnSKWu_Ng2*EGQPNS(@>>K6pT zx64CE(Ohi6Qhd1$5U>54`F=k0|D3_zt+8sLPVM!Svd8zi$ND1Qk3izURFx6n(gvqv zqR=ylk_6ZO2IUdn9Wqr}xYJier!Lc&f3vE_r!mnd zvwa~>tVV1--h{961&!EBe8c$QL{>BH04t=^TdR_H@pe$+5tV|L;**?)9JKpLO=CVz z6ze>)2cVlWUT<@I7+0^e`^wzZxnwKi^9jVlGCTvhXT(`)t`VJzY)vHPr4@3FuG?BV3KL5>yfWk3Lo|hMEjjK8W6AqBquYU@AA> zDUQ3x!o5JSLjkyOdF_LG9u@uf8_JuPBl1A?JG8k1H;E@^;^f?qp?Ar`2!kJj>)_kN zOb)d-mJIQQch5k!A$YgLyT=<*gKlosCq^2wJ9k$kPN72ik#P70bJC<5BR?zGTzEbi zA6Jzx1F51LBf*uE@ZBSaj0m1bb2P{cZ`wr(q0o?nM+guYW4Z zK2rD<%6$a51C+Fupg8p+^wrFkB#05c9j!1t-Hf^e%TyTbWDyS1M|qAQ+^fQf@z5jm zLhJ_=FEMVdF~3I8q73XCc*9$OvTDkKpL7Qw8+0G2qR@rm2%?rAUUeZ!k4^wXzhJ2P zLTg0PJ(w(ki2gPRGf4iL3LJ!seh1DVWwBzn-~!R*kg_Vb0IlTVtvP&D8hQhO9w#Us zM$GeK7)YEa)r7!5VtKA|BqwuT7^W2KN{t!SeITAKluptPart57nfRGNmMii5Ps4fMBxkhv;OW=;zRIV@YOlId&yLykaPg&HOvah|;bV zUGib^IYzV^>`@GftHEDko9yB;6E`5h$u?e;T6DZ_rJm!Y;rY>v(y1 zg)j2$=8|w75PcM=fIL8E4n&)Cf?qGrPE_5u-DTBri*j-2g%limeM}mDc!tdI*AXNbTd8*MbRQ z>7#|E5txin;1Z-x=;$kpkL^S%96eW2;VyO!J_U^XZ{{?{2;0LJ+YCO%aT~vs;8-G) zAvFtmcsw3fByKAJIu%8?k>`!*5EhJlmt7H;=hnAWn#_JE_x+)!5o@DBuIHQN6@Ye1 zpy)Tmvf#0?q#xvLx78Hi%M)W;mm-FIU!<;yQS~vN$UW~)CtK&5M?+MCiPn_cqtHpR zSA01+4D%fNh&y({+CbWfSs-rV4y!q5Zl!C$xJL8~FeUqZrSuu55tySaH_vQ+r6SBV z#1SKx7bpUlP+Q7(p3N+?hC9q+VfWDCZlsBdz6AawI}Z+J7oUY_D~!3OoOK(%v(2?w z9axi54=PM$r_);Oa7O88cj7sU7XCBgFt8~ z3P+)l2vE;rx60VTUt3pGf?M48X>=|x!F=~Q@uUI}RiLzhBlV&KK%Nq-j}#k*p{l`% zLUm+X%Q@~yYvergSx*ToaCK0J^45>|u;ZTs$$l7jyTR^dD1a=J)OGRJxv1tad`Yw}bTDGl92u{iD*s+dC9UAka$Dsb}AX|pD5*;`Dl~IZ^t;a!p+j`^vrR&{;kw)*hWFy7c z9EQ5#UgsFGcBH+w6=Jl-NE2x3zsW+gc|~&cZe@gTM+l3W#wv|b!T1(;=MjsR7nSja zImh*v>O9$E6Z}X|snCjf|6$p#55~v(Ij<39vgfggZ35Re6P+bj0iXrjf?65gU0KP@ z)Wqvyu&|iT$mbE)Px5?3ata7|m^SH8z4>%kogwX@Y0P57f1#sP03bmnX{tjHcZKbTO}63-Jz5W4~;!TcI7 zhAS{_MV`j27fd8dq@7opI4>CA5X^rkP+ixg_F&js6^YjC6eA;(d{NEJQ!5e&1K)v2 zy1Q%omsoXCQP7sn(zfhj+LrYU2He(;y?S~wYXkg1Ff>%Jkdy%H+oBbrmy9I^$ul;E zcNH4*pJxBBM6y%l#6j>N7IuX{K+Iw{skuTGRr3!QmBwyXafkt|{J$q2yyWp@6g6-Zugc7S|qGQo&t$+xU2Mr=1t&)H2;?nn97 z>_Q{q;~Y?*v(g$=wHh>1(7fT36hWtum}i()lr}jYr+Er z-9CXnb1<)9?;RvR+4_w$WAtTCL&==Q2zq&?oZ^Ntk#O3+haqyxtbnu4kvAw;AN?Y& zxM7)PEV&r-?q>5C=zgB@F9sxYBxhgEI$Z^{U|$m63xVq*XrzjGA3jU=}j<2%r z;9InzDSSzPh5XfcY@VloOy`dqXaniCX=aVp=MMH6<>DI~suSN(zMSgut~z z$~eqmfT_k}Xag{2rogBK|5)F#yDpte1&h_9OAi7Bv3fX?7nh=3(FWX3CSiRtpooUR z53zl|LY`8t}WmZBy? zU#kZ#QHVmk*?j60T8Ak3$U+~-3E0c12$y%Tl9Hlnt)eJ?_--=j4lD@*W>G_4uDm;~m3-e<+vZ31(*ACj$DfEZ+ zmF(GXCrEZPDXaLw>VU49w}UF?$|5m4)AtwPaF>7#+CztmJrp#e9YDqKt`SD`Z6FZd zH88}R1mN`dy5Mi_3!{|&A-G5rW3Z1f6&ijP6ug*L+0fLXelovpHy6M#Q1+3^r0nh~ zd=B`|oCZ`2v|5>UxBSU|S&{A(Y}Ahd)5J6{_eXx3?8w1iWhw9U?lvlf%m| zK;5g!e-hH2Vu94L7H2F=6uyZ1I2cPgZL=NZc$uLj;pJf7Z5mqWa;i}!_49eHj}J%S zX8YhQeUTlgN1HBZlQv%}$_qzvqp;U)P zz};PW5o41M(>@^0>+J*4A;x09gBRm)J-%F9+Fy5ggx`W|6Rbp1htHi8^wE;&s z_Sw7k2V+8fD4^?Uy+b(H(cI{06PyMh1)S;?pL9-37R`F!Nk z2_P6FaXMVv8$(UQy~{&A%cEOi|Irpk&u*b^luog2CWBBm-463(Ec$9!IQAdM5&eXA z*&K);WIoAdgKf^O!tEpG6+u49xw%A^pNEN62SqU!D}jqUTGh1F$PbA#p#YUdleir3 z&OwfkQ&|<{^>Z62*)(t}8THJL70H7?48-9ezP!zv((J!vW@c6teP~37q5Hx3&f2zG zPoQfEj)EQm@PhDFEqx)JDcM}d>fMB~T zzpE*2;j`|!qinsrYFz{nL_-?h)R+_e!>hY;5 zZRxXK0|m5RtSN2fv)*xkL;i|$SsO=pAa21pp}^-S+^r?X$Ok)G-PsT4ygUk8y97)W zOpMDVO7J0DD3~$G18)ke;+u7?(4RdsA3302pU{~bP?cKq{6Jg;Q3rX2@faZYQO9-0 zpeWS`aw}~Cs#^(wmd)Z5gzaIUj7JNB1YAw@&%W#fnI^_dTUx&Wd5#qKc(~xfF!;^2 zhQbDW{rHI?vF6aj@kI`%?bauNh2yKO>tF}bYb4$x?c2yUPf6k}1&nc%oX)hhkBG;L z(7~9PASl{LLzh4qU9FVPVKh+1tf9x^3FR3or)LPea_VNt^VLp~Nwv9(1GAp~AJGDt{RQ<$v$+1wl zW*|N!yTV69@48czT|#1!c*h9w&coifgERtEr&b<7q$=$!&3cpF=;s(M2)ir?j@V?E zu`XbB&saP10ovDoh560w-<3pIDK0nsRu026=m!P;KXOKSt+uNmebr*G{*I}_wpZ0i zvl}mB|J7Y8)XsC(ziM0uv{K`WgjRW_|KKL>D}H>Whe3V~N}9)ZSDdYUqK(?8cC;|I zqv6+qV|0cQyAMVJ=rrAkeJ$TI&%pjPzQJmQf2z4T_*m()NXWf1aUx5d&}{`5#Bgih zj7bq5%VQ8SReQ4NedP-!!>yFI5MLV>|*v_&WzylKjE z9K$xCC5B^Kw+-H7IIE;MUu_Yad9xzcAuWYiM)Z83?l?Xhq8IihZ2cL6p4e_~qy0en zr|Zb&AQj}YqyP?}i;P%nMWq?=oCte=5XeU~3j;g2pEVOT8CR`h3sD1fMaIaC@a3fI zj~umRDO>^MHcW?GS$tWkBN6Z6ci|^8^*_L#CXfF3By)4VyU0-Ko5bd?&!6Og~izJcC$?vyw?F7 zWANNoIYx9IFp`b_+4!PVNfRt2WVm~xq=V2-ca+!2NIVI*y*u3k$GdMIZAbH-)0r{f~QiVZ`Yyc!g6JQMmmUs(jF7$D9We6Ta=nH!&MqLagXiF>m z16~b{aGEpS=49{4{0`n2y9W-R=BYhA&8LgWrw>_YW3mtTx(LUN(5@B42kkG3UsrMK z&SWv`B#bNA=XZpKBA93;v<&$s7(Ws5m~CnwaqS&=?P1J9t6OIRo*LbMHf2W`v&RdV zdo`qLRUFYG^R;~RPt|9Zqp}4LZ-CUA?H{rO5YzQ97A03hXAa634{JuY9B9=eoQ?(& z^aymw%u&V?UojNTmmNfHK!NN!XZgNM=Va5Jg3xJsqhoroi zskeiQk1?vKh*4r(S_y2GRvNJizC;j!i0RM_liAsAF=8wcMe9~P(|)DsA^I7G%I3qS zc&1nobI%VR7DRN+v(POfOFy3Ca^i8KW?%x06_sh#0GL8}Lh*q{)rGDQQzbHSM6W(s&M59?R1H=o3erlX4?v*70XXY4@T0M0y00`j z;J)GxMwL%NyDbfx!nvY2*QnYI0Yk6eYoJ6i!m6fqBs(-mnmz`gnBdN0>|Z?2N-{j) z$~4wU9Hv<#*|TChwrs#5*gDU2GEZ7_j{-omp!r0!2?GbZgDvg^K0i>JvAqOQ0#JISH+1E z2~~p1YP^EQc}7)t0#D|=oW^76WP0^44GHL`QxQWS-Z#sLeFWSjcDd^Wn9UHh@=g|UKS(VgWrN%8-v^raQ^WUIK>^04ZRrAYiANI)>qEJW;ryS1^7trnRxCKct|hq*q4Sf@wwI z48L^@`lJ1Lj-o6PdqZvHWU;?#(zyf*otj@WAq=|1*X^nE=r%1VNUmu_(PTN5dxl&^V;>bS9b=V=aJ!Spc&^m6 zjGFwetXuP~B7SeT8YT^>-dm^+E*jCsTE$WxVjSs#@Cd%U(f^acWAE?-dVDmv$MQz0EwBehFQv^3B>!-j-$`Z7)-eXtA`q-F!$OsbK6pK1&;lt=y|dX4ki~o`1{yG_iP2I6E>PGVfV(!`>@K{=h$oJR8+U&cwI0Y*w&ZocqY_oXOH`7!3P_iJx-|yHFUlU|+cPNM4e^q$Q5#ua zkP5N44RON$AUej*r(D87J0nx8)n(X{hog`>A}b z+wvEyoys1xyRkBY=9GhBC-&8@$i$hElaRfXygQjoKrN*|;d|N=+&=efR6U8gOnEG~ z$PyeL@MOF0$8lkVi1uXXY`9AB6|?TjOmI>+M^XB%yVyn}YGs<+`QJZw@NeSkq)AwE zG*)>lW=uV1C%csMbiTV&S#i**SlZYoOB3n&cJopRG*XIlJHMxK)jFzbcT&`f^9aOq zxJY)O!O%z#d%8%T@`lqx4{uy6nzdmW8Rj(Jv(r|mvZsfG_EY(7x8=WJ+Qo3E8QX1~ z%8EQ=e{wClt@us`3CdqZo(=q-MxLQgBO-Lr&S<3Fp_{$rdHF=ttIvbClBLju#e8R) z2PdP6MtSh>tx>@~UlG7Mo;>)(u?`Pj{yKZvAP;sR>Wz%h-IN6-`p>yM=oulEA-~}f zI@hgS*Y#CJu~8>T~%TlpimE1t>|Ma?x_xtXVO zgEEr8pUMl6qEr)cq&o?Di1@vOwjyG&Ck$!15O4kl|9-Gc<3CisI0y{(Qm-NB8GG0#4(yu@vGIE-QpC_4H?V0rBi-&sfMs z0w`GIX7UWoCrF80j4;S@>Ew1Z=T#Ketra%V+H>_=!&2VU+I4KrGaQ|OzCA^3`JTXW zep3D_9rzZ%r*-KjB)9c+Nly{~NYR1+I!>9uY%9ohb&_WdU>1ofEDO@=PuQHj7?{9@ zU<&gHb@OA`DD4g`sEaL?@+tK2W@ zu3;(u+@WF5L`_4LZx(gu^4as>xs%n&wbEhpK0IS60`2Rg_$;k!5vLLFWHldJ&YjC! zVW7MWak{6ScL-mLQ~$2~-tPNjM4WmkabnoSf)O2SN)GW^53#U^vobw^=JOpmYn#Qt z3gPK|mWJ?pW&n4EMbEC+yy8XpT#T=`ojU>3=HSB!QzLxSvHaeyPa_CtSicY%55jG+ zp0EMEFNkh8`+5+r=Jzy&k4Z;ZmYM6{@*->zkw$o^KhJimxp=(H=IJ)edDO4*jc|1P z;B1U;!8)ZlWW;(3?$uBl*mq0A=`07Qo<1M~y3-Ew@MSh&7IQPsY#gyqT*RU{D^wrI zO6X}~^2C0CJnjN{b4f!$uA;J!zft&!)rVjhH8YA|aA-&w@Q9)qhzIofsSU+=b>xf9 z#XLQM%kt6|tC4@nh+QVpNYJ84VnF|w9E&$p)y!f9p)-3gz%=3aTo#V3$ik7t z+dm^-P)o@%5|9!6mfXZM)JAjzz87wiRM^L1{~%>wep-or+_>sRX*vKmh6Fdr?%5l1 zAlA2*TfE%v{jhAo*aEn>+JPIn%v8%$M0H~AP$aS>se-IAXcC#68XQ;VVb;77a;fQ= zy9+RP(d|HHtFLprt2iS_;ZDwRj^h3P8xmY8-s}DN?kL{f^%XC@1tTedO31XzeTn)y z*{%RPv*-`K5P^6bEcUrk4<8@hbtC*%gbg(!2gmTBvXX-&d^DQfJbgPylx@Shbhumj zQ+HkDVfYUFsr-XW)xoY!>S_67QKAjE{F7VxeP~sUw#T5oXFrwax|O@QXnR^7yPwJ( z-O5{C|CFclQBv9KKeCf(+#!llW(3=fv?Esra17ia7{C)SfYuAJE|7LBb`UgxOwIK$Ue z;4nu;_18%IwAqMu#2ioGVBYtrsgh#|*hv7ZVp%QdKfe`*n5&i#vsdIkEOxB|LS_4dI0Q5UT`&6)+s(&mSZMqGe3sSocfX_fz1<9)h5_1zTB*aQ?Pf$G5dH3S8&bvA*!FMr%d-P&q*0Pp!lbp}^niCF5x*V>bH+o9;e*6c4T})}_i;c_1A@ zNEcO(6-W~*d+9RAOSjbQXYGX>9eahbkG442H1WdtjPEhyFLmEPu+nJ*x!(DHk`ZeT zu~qFDvCXcC4BX_D-Dlm1vgVMlf%WP97{HHGeiZZLWPY5$kK_1}$B(1wt`?)vmf90 zJ5Cs!#-#6 z-IU7nA=}u2&-)xhp-P=2Q@F;U8%u#$9k*F@97h~=bQ}d-2_Ok5qqqSsBe>tTQE>qg zfxMq@)xEtW!QbzFp7;Mee|${ed#mcysZ*y;ojP^uRO(X}`rZk39hKGzAx`ArXj&ic zIk*KE-dgOO0iOv1DxW12opo1xL_Hkt6Ww}-@@ZMFn5iho>u~OJ?lblSouvwY=s7CC zt$njMLy7yfyR7XqG6q>6vFQ^)W4ErzV{?tF$*K!wA27DnqaIf4(9@@UF9rQ2{UH!j z)d%dWFbDnI%t4bYq|^sut-zjGhMsSH{!-`TVPln$yVg^+^_{W-hn}uwQq9iO#tAun zgL8c$WKl@AJXa{}6P$P=%Iq#2avpX@c*3yy@ZOqyvr)_<6H%2bQva%v|34f2#m?X5 z+JbB|5wG@6ecxbsyv(aDd?C}oeq>nAJ&X7x;9^E~Tbc68QMyW;Eh{Cj}{%yBK|E5w!ON=jbD*5KE z4gbhzuc2MFa%85Jc;IOT_=+1nx9@SBe*o2=(Nwb%ByE}DC=>0cj5gb7v0P2d`6SCy zBi^^|9r8c6cXqbDUbI^Qa{70(Ee z->^%=Wy7ZVo5VuHIK6i#Z3ewE4aC$zF)C$vovG7_m@h_F{KtOzmLX%UJ>h3Fc>UMIYiS-{YxXqmNE$!MzFRrH z23{w6tm-<*ji-^F=&{o~mkI%XXn(! zp#X|P%CsVM*cD6?V~Vv9R|nXbCY)k@iav<`KtzG(c*w+*;k*r9U!o9W&2q=bC5@Hr z_i>q0X?BI>JOn{L0x+ZL)_K0&!f&xcx*y%d&H`|3+hPFmX9HnnNV61N?K{%_J?a z76zZcKqmc4%@gsl6*z%$Kio7s53kHogj{!jf0+7qNLvQk8gv|57EH(&zR3T%fw;qrjgw^ZQTDl|1!uO$md93^GL zVRTQh)RH`FQFLqd)%&1AEhNZ+fBx%% z@%{f0@O{(%p*NrF^jKg42uSOID|K0A^Mh(ap338Ie$m*oUM$NTN@7-1;d@?FVH^fE z)@Ewt*)wZILFB~*G?=KbP(erVS?I5G4rNG6+_soPL6WV@p*ikpT%kYd2h?QWFX1!y zWJfWi#aitueIRQl@3~gZNETB4*rrK>pD8mwM5^`q#pl1brG{+=4% zjlWl;9}g>;7J1p8$5YWQEcQKmrcA(n_guZzn+E~R8~=?O%=Vb8b0KZ6^Upt{yw}tA zd|{wBBp50m&UkTa?Y`KBiu=qS)k~W`ZMFDPC;oNdS3>@Pi&D4) z9j&#gyE7FnLv*~+iZbRogLj6Gdp>9Bc#50vf}P}B@{rAUh@0=&o#Z?8F`I7(bFS+3 z?<8M;Lgk2btKEE`VLY|{Y}&o2e}1h`p7}DW#@2tO9kPdcRL6}51;asRj4Sk9^jYKa z=x+*8oBEJYQvN1IxHCtOj^e@g`Rqk1Y6GVnPC+xIy&fC7tCbxbqUhV+4e@~f&bjhp zl-Xs72%SNaWU$Pu5Fc55{Y|^_E}V(o)6|V;tP@RJw_1chWUJX5E{e*NLH8SF@X-B) z7lZD5JXmz!et~pJzbK_7*4&BH_8xfIV@)?O_gGn$tQrC0OdLYg#o6L}WCq{-{@?td z+y7`F>aPo^JZHRF{(cRy<269{DSH&VtB;zA7d^NKK)D#Kl zW@XTfpg#mKGBW|BvquR>(gN>@R!^gS>C9?B8k9Y=nmSn^=H}Dw`1>usd$@QUnvX{Y z--nD@rMvUx?zUa+Te@!7k6!!Td~0@+?;ck^{@Km<*iQ1D>yFo{ZoV6Ll5g+3-Trs; zU9gjUXBY!Ycjpi{-?2N%H|lQ|-j3aDd;L4f_v4>!zSVBN&%W=PE{FZy=KH6cZ_!Tj zJ?qW~v)p`g)Xw1_%g4R==<)FpaH6Eirp@}bB4IBvKWwtx4*d+T*z?W%^wtm`(`%7C zw~dBN4DT>|_BFHHwBCq|tc-X@IAqRO=gb$^Q>TZ=12B#^o`Svf-1oRm8c$VEn+*kY z=u>*8q$&+3+mkKmXLEFj{dYBY6@O!mQ_*p9d8zdftK%kF^rLbD9mM= z7S$c8%aQf@6nWnrzh@#RM3G;qzC4;0-C^5%jGMzaYUkzHXiTEro!#6V2X%t6$mPn7 zCD<&t-CX}2*_=nPW6=Xr<0Zh)`<7R? z^Tks{FOY(s*$^Q&3Rz?(odtaqTm z>$*;b0zaIyT!x{0uy~ss4vBkT`zAYHH$ezp-gvc#&~2Q+bmc|gabV>xWGx#CGh^^f zrI>&XgT=d>%iuPPCEea!42W<2GYd^v6GE_6O}4Gu8&+njVl`w}zz+aJ1Z!3}lsqz~ zu0%JipMC#pjJNvP51XU9mrcyG@EYU1EYPdM_1uLSe~l|J2Q79j^DOL=XZjK zmrc?xlU0-6WdL}qI>l4+%rRK#&?Aj`-{_M+_`)Ab%eFf9)r7y!Sn}HfBCWB`Epl6n zi@k3n{XL9E(}RU(xKiuHlJuXgy$^V|Lj0>DH|3AW*}pd<(nBM4w*P7Bn&XJHUh66cdxfx;k5*UubVT z$7b;9-331ObgiQIb7nJv6(YJ7qufOEfMw;uPC%zwL7V~l6e~~b-=q8Qnx;}fmW||T z^wu7nFBX7g-+gf4uxoR#?|y2#XyYe2A5?IW$YO$F3UYiG9gbJem-*C$N9%4@rU$SeO)Ywib$JRsyhWOVaQ^*8V7=Hob}nM=8V#Ofpd+gtc%Cf531 z>sw>S;4nL;`PO;jbEyZ`vy>}M(SgjZC0G>Q`TaDRnv#8cjQBvr~b^L_5>&Fn_CE#9Da~HR{MW0 zJK-C~m@to-MSHA6&ngZlbaY`B-YF)k>k_kWbT{|{@jG;U0oBc}9Djqs-Lo3iL8*O*S$w|;5#E>p*mW|5?C#i>XZ zxnsBr!&cMHI7hg-9piGQ+`iPvZ0?$SyxhK2W%lzvW`5b9n`=^gXTN@YvzNJVik*D+ zXD;J-$NJD9Clu$3nk~itS-N^eeHBVx)!XU0gbwk)FuGeHQLwpwl&^$n$C`Zj)ZKkw z@4gqh@2lMRBKN(;eedDEKkmNwbl>lF-;3S%x$b)}_dTk2SSA!dwz2v@robk$~9GmM;6Nzx<(%@&;VOd0kZLHp4W0yy#?*ZQM zp77pND1V|qt)1|;tM6Vpc-Lm|zA6{*OMvC!o!^xu{#gyG$adv!&M@Jf-<1a|J*O)N zxbI$97P;?US8mc2LVm9+KXKo^u6)aV_quYK`|fq+Q|^0jw`;ZD61-j9cN6Q2_df1> zvzy-6eUG^BySnd}xbJ@VeT@4~KNwbRaNqm6?^SxwVWoq4k51o>F4;#Zv8B7KH{S1> zm2AB3sa)RWvC;{dE}6}~EuzM<((7i%L+>_Ye5#oTHEIh}zcaP`K;XQG$rGL4tn{iLM)SE8AXqjY(mka2w+@L9;a>x%;R1Sy<;H zsv~-_u`H~BQfBX*yEbn}Auww`t_^hWcYQ&Q7_m`o>!57RFGcpREN z>)>4SG`|};l1Tu^ZZ|+D`=e%kr~Rj|>f^+`#>wG`OeqEGeAI@Y%L~*w^*rlx^adSi zOlvVP*hx4|v&o!3T6A>Yz=zqzr#Wmy^@}A~*)xGU4LY@0EEU$yanIDV^%W}b#Z2}* z4qbKr1!t6-oi5;DE$u!#z+T`etfiIlv3)wXElR6_=kDi!?QeP?`e>C|_dF9Y7O#zF z7ykuq?$^Dbt$lO#hS?KOPJ=2la54FTi^aWS@lEw|8Q7A#BX`{=`t@|qptsdLP4R>+ z+)pjkO)Kl|bb})X!GwmDPqM)db)T+}`!2pz)jW6mHPOu6=kDYE&8yw#LL&LRuARC3IgfVXsAwf60Z&fy8t! znHkx8ST{G2E8|uP#kzIlCiOs02KA=AHZvLilg+RT5SktYyte$FGT#|)i7|MR!MF*r z;OZ%ddlyjunT&KCsI7L6(g0-4qUM^udRS?R2Hmv&vvUHmhGHFXTH4eSxl3OSNP!dB zT#%RLfLymQALQ69NYi2i)or%9CswUJR;>P0H87E9ke<<@hRtq^bv?WRVr=&f#cqt{ zTBv?Tc}et|;-3EI=`bTV6XA<=A_REJ`WWrvZyvAj_>~-tIzB3fIxhXjKmVxOAxu27 zTuDihh8?J09Qh1kvdE~1oaQ~VyYt8f#;xLktD-V7$=!S*)2C>>y6;JfsQejYe3ra) z`>{k#$M)EsI$2{RYwzQ_m((~ud3GVaD_`!rV&6G`U2FVj#BbY2N3RQshkMYsFwS!D{i~tc@Z!`T#Vd(3!R~ml<3^p;Q!?0{ zkyCzQe))^s@^sHLlcoTA;c>@JntaKW3&V)n3GuUvTSxY0_?XVxMIDveKZidEre_wl z!oHg`P|HNJrm9SqT1&{CQ0#FtukDx_seYxM)7~g~!T*hA~F}w^xeoCTz%g)9Sq1HcZ zA63Cf1FPW`1m*X!GzvF{s<+sf?+Ch*ATFUSHznU&r<@+}wdlU&8_zUc)_{0xbxy~# zJXgM(82HH9^r6zHgd_mNCon_#xrA&J=L4ZqWBj}Hy*7RQGl!SiCXoE;gm`~6%LW3f zPh{P9MRDqA(+4Dd=b__HH?98I9kl(#*1|&vX%;g=pQ~iUy*$}hkr+N?9#;+!o4#Nk z6QD!(d4-(w;1}!2uwm%p&+@FP?Q-&PVSOcjd*P!+F%ya&`jlyI?d1#am;D$>)WW5n zOTS_kV^7I@Upm*Lls5aIj-gZqs!%Saa1YmEuj15@Ts}h%&&%PnWL4kWwTO^_@8vx3 zDbn4d-rPVbCV-JPr7mfSM~x|wC|C3e|kXk2`&WL@nN)qlveg; zy^^fD)0l0AUw7v}ggj4O3vw!#p5+|zx&~3sKQsGi=80{9Td$M7_>ONJ0k^xC4pGo4#+Qp3kW__di`h8b1YgErBOw(ve zczEgmaxPkyo}QSLXr_ zpQs{sl7wQZ6#&VZZzT_Xx&6)iMfvaPe>!7Ux&0cuu1^s@e$Zx2Vq_mQqVDG8@urM{ z(vz$5tIqQ2U{O6IGv!Y&hH+l`ryjC0^B_a>2G$Uhif)kU&DYjCvhsL;zn=QCo-h7> zBe@-MQqjlunTfxlc<_1I`ym-WcNq__ug-%(k4hc>k<_YM=$MDkuEHmy_S$*Ls=Y%y zpro~}JAMKTpo@u%P46vrT3_*oneC5XXZs`3B540p ze{f-OPJit0^~doX%gF5y;|wp|+t=nce$rLaI%xNlOD?=ZZSJFC0DY5H=R`B@=$m@- zgZ~BH4ad97jiry|SAFOI9o;kYC&$6V89e%>4)^d#R^4+==KC(G{@L#%jA`HWOP|!X z^)B9`|F1gJ|7}LkQfCDBa9APd-gfKh3h?|$u4o#`thR-_q$fbwP~tM?q8SBO`kSt! zO-xo5je|DmFj98a6igmN$blS=+-`cra^yn~=Tqiqu3|x_5c)^~3eNfmaD7q-M&qKe zN>@ag`e)4&7$d8+;b7%ewpLF+%Zvl96TLa)=nl0$sWebVkQ`x9mk9&)12aakf(#rs znz6W*2*AH?e}=3DA^-5tL)|Yc!zj-3-`_M2S_YEGNJ%IONnE3s{M?&u_cIln*)|k! za~9AZu_1SCo24fK!l`>kaVXQ`OCGWW1 zVmA3oY)#unmpWg*FLl|an5)e;dfIF}3ss7isM?*oU-EcW9P6I1E6F?a zS*-B)pSrG6D5Zr3PSYq&HAh^+0@@s+AxE;WUuMzdfZs>>QH$Hr$WXaZ-j5pmE>~sD zAi=AjbnRy)2VBC(!7T&uHXVeBKlppH3sxYSwrfV!J`uc^FJ z(ax&ZaVy(L=Hqkr3zr&E{<(5TLt*H(T$oH9&X)`?D@4|{6B_J*KZgL0y(s5Di81hVi4+~=}yD7o8+m5t5q-K zpWnncMZumHh+kXI!K{mQciJDz3v^a2*;GDH$R%$opKFR@z%oZixHYNRnSzlmkf4w( zR&A|KG&(hiEk@c6HPiFChLPCL;SY#*{X?m zk;>n%$z(KMTmcgAHR#+{UPcYJ^ljx^_-01%1TrWgkQgxaWjc0HlL0DQ8q?e#4O+I4 z?vn%0AugVQIEm^wa0#eR(@h){@XvzU1<@05(32h%y^bpfn`aA$SQ{Ri=ANw2oLWz&sbwQ(U&bEw)*<=v60_tnQ;?4=a??3hkM)Q&RQ$rX1T{P zq#>Y4%emw-P;Fa|Doagf8A2>(4vl?gEOp)jrEhkDVw6=tsMP@F25V15AMFX9)dsq` zVQ;l_i(%e+vbm;_2SiKIB`S){(%ce=SR*dU<}h;upSj9ihY46Mxl}XAGLY0=={-FA zQ)b*n#u>=%5vSv5JL9ja={d`0L(OQbb3f&dwj<0pJKFHPqLZAx`INfCNG{eh&MtcW zt$GM_Yf}|GEh;D^;r07ON9&Bc0AerH*VIiE)BrJ8cr79 zclh-~p5WKQ48NXzgV@B%{4120g$|?nTjB4YJ-wvExQ^ZCZ97$kaVWuLekO?%M-L|JdYZrbZ&d==R1B{=kCy9G8+_s1TBK3~b}>Iy z!`Q%)9Pe`oD-)ad`fT2*Ht+s6Z-tw8{lS^MW3qYgHrNb!Ljx^Y_1?P{?vv!z{At#) z$(ldhrAf^>5`g-e{pY6t2}cvsckPtEG@I2t!ItO$dRu$uGw%H?sJ0BGOnUp#V4%<)abXbNr@NwUlh#J(zZ9$d8D#g}h+VKg3E6;a9ulsV!{}1%OxvTQ=obvwz zy-(>>K9^V3a`>l-^=sxc@8d7cc{lvi=JC&t=elUm^*Q(?abCzHXg>eF&{g?MbILdV zborTGmG@=KFI9W?%6%Ul@&hOt`#4IwI-|SNG}y(b^DC0jz8_6zvpIG3S>E1dRGtSgDVEDRF*-j!JT7`7@xTiJ55^zB&{5W64@~Mkx8K z?o3-pvke(syrEk#_M*?3aF2=(=!rv%SryzYpvj{4E&LGF;Me{IC*eO=^A)SB2Cb|X zXa_1?j{fCefKmHKlfCf9K=sD(@!A`y_AdzFg13r=wtvB5oCT+@sUP&gg!oC_{0mmF zB$zXJ#f19!MXbIrDx47So$kXTvI!ve;%}xaDt5lU+bFthw@J@S4)_hVKOH4cThT>4 zM)@xgA26E7losoD9ok6k{t0z));tz?xAbGo+g0QWBnz((#ETB(G5vwZWMj-}8tZjK zFq^#o&@!_bSEUAGW1s_n?zCe7;v^0MgQS$P2tqsWVzw<@ z&H6PM|IXc*DR#c3N!CaZ-bJhHP`m{v#HAtsV;@PWxqyOh@&}kobX>K~+50Y2CjF{G z*|p!`9g`e5ZWE1?ow|D6ls=*4{pyrZ^=DJ|5oi!`7w%_+M~1$kzVZSX zqcA3a&|&KNW7YG4_!|F$*XZZD!RoidXUN=Ay*lz{y>G>YIB?lJ2?huKH@NtVZg9cR zjUFEyv^Eq!ViwyN!Nra~H*7PlNY>1uzucCr!^j57wB9kW0^QQ@_!sn`U?T-5P$vD7 z*Rk$zF1;$r^uM6jpg(k`R}n}!wW>Z*^uf3PzoFNepwf|Ezoa`|dOb$ZcTKN>6zq~- z8A0ySFHpUMsKME8Uq!+TTU`3et1*5hqR_ zqiwVhcmeu8)FJ&+Fa2Jd9`BGI^wOg?{n8HUgS_-{Ha*xOy_c6>YtsjHNKfHYZtyF! z>AgCnukg|fY3!B84f~Q$- zVdbhX5AchHifKE5XBP)0d14xBSCaB5iEBx*VziGM62ms)3m*s?OdDgf)5f6)xryw| zW#qHgv}Lj-FBFXQD*Lm-IpcTkdainANw_y_0_+7n+s9#g8Br#=d#L8=-C`q#`PxQQ zG7l8AjyT0fJha%%%5LGkbKu9%EQ_B~*+xb(EuG7A-hd7m_&NFi7w{vi1jWw3fqeMV zhckN(n@5(EM$iv7htG!KcfST5lT~k^TG0Van_ut72bwZ$b1nJy2vnz$JemaZ=@X$F zrWZTAqgLUPm&xJTamlYUm0qLpj)=cfH-^Ybb!RlhUliZLA#yA#X+UqA7D!A6-gU*! zQa05LJihuvLoX{1hpiM<+?+&rVi9W#M44w4JAcT@{5F}(!e{ZfB79nGc&#rseVDI4 zeiaHLUoL1HUS9~MGq5~7aelb1C4Fmv*_Gb~p8gY?evB_Xi{i(mqnhXHbV+pc6!F#Y zA?mW!1nD{%|Ek#Z%5IT+T8E$FOW!U*BzrIhUZy>nguTdv^#pe_8``GT{wDcHhG?}K z6pFVyttfo*Z~Q&q**{>?z_>EG*!idZew$9|;5x^>m?~HuDf2h|+dfqg7K*3XjQYa* z_!UF^O;7L^lv#LiOG6xe;cKRuP@Q4pQ2ZM#iDpN=B08h7fcQgXj?M@bv^CVr7E5=S z?zX2*uxzs4s@@VA8X6iYj(i-O0cqZ+mSfUuO%3(&R0brz5|#Od+t-_YzUt$lZ)zAs zGx|zhY`ZUVS6hRjWqR&hYF78e8tAtf!{qBy;cuQwZalua`I|52skPx0UrOQXEPaFV zHP+>(F79%-QNd<%0^n63B}VpV$ zgr7Su`-OZcTHnCZrSrNyt$_=P>ii8DZkFOQ)3XlY5v=~Nf7WdzS?^|rZ8jI*o2_*Z zXk?gaU&yzWHNd=X_3^J=KrWEzj8h6CrvtSFa8rYMWy34-kV!wEWUk8R(UH%@Dx9L;xtT!&Jo9Z^dSQ`FqAmNIQ_Y#oNa3hkYkz1%ouap=X zFbp5m!7JRbYroo6-C!-G7#9i>wI!S=W99SxYX>a;epUSe>ti4EjIAHnYjFfoJiIhe zePU_&5+PfULXU%>*}IHV5`nha--H^*Cd(1d%ciRPsL@C~ZG~>@t2~UXX)u@spTC)% ztagiHfR1mzfY8T!cIs`^A6iIEyf?Z;hLs;KlZScqo)$5c@@lb^vWoWEG$;iJ$R zmM+-lLX}B5bt+KJ_yB1db`FT=9{S`p))|8hfoODJE`$SaFk$rDrU>vP5n?OWE`p!Qu+9M-BZ zC!^$9qM@v{!8Z8lV0CMF*H{A$M#QAwPeL5gltV=FZ4)nh9tZD?Adu##?HEuXCxM+Vik zj%@S=hhBiUUo(Kgy&~I4Z+^$eZVkr}MmBQOhkyR0(Cqs19*KtjEV<7Lp&yk=?qQrL zLMwXBS}GR*P`r7GFWe*0eV0H2r@0xW6kHkm5Wl+}$nSbgDErnel;{g3F9FA%_5KEF zRlc{`U-HifKJ}g2$e*)9y_T!Kx?XREsxL0**xGZHsz;?lxL(foWt`1TW{8(lH}Kt+ z_t#Y-a3wvZT9q)wFjzG6DhFp&1rQ%eIEvQ-9-6`KyMks=2ZnrKgJoU(ZHpb4$s;dZ zR1YAU_c{1uJt=8lWl%Fn7KgIfO+BzKdL2S{pQhLS=7;ug$_IrDHuh7u(@X!I4hunz7a$l4pRole=$}f}z`vs*;1x;b(S*?Hm z7g93hRTZipU*>OO)vYI#j14@+rc24V3MFF{+)@YEH71F)t%=sRQR+J6@yFJZCP&qR z^kJGWrmOq^sr;K8)b;oCSU(#l(O|5#BvgG&xqp^JeZj<;VutweTwdrI?B0hp0L*Z_ zVFa$vc%R6GA>Rj~#Qp?^sOR1t>@;=lqbs7*Pr*U_&p`6eAKlpWdiY*crs<)Ergr?- z>1fpsB7=oUd`mWhk_wWY+mdOgFQENBzA+yrLhmJa#0s|P%d7f9gVm^hkPHJIk_@a&2{dx zbbWjsK5(0tp!qB{1efdT(|Y>Q7d}6wJ!DsZm6Gdk8c2zt&S!D_fSU7bJpu64OO;Ya z632!Tr&LCYTpj0q+<;5!+F!FZzqOq`K`0oWmwJgu`tAP()ayK`Po%;ID)UcWwB2Vy z_QqBf7+2x=@Oppq7?M5JJrsxEnuqHPy0yQ#nkP+2sY)K^s@;*^maF#X!~Y9B*RYFV zTL0J7N*+!Bvuuz?oZ_Oic7tz9PU7xjA|(~Uj#p;(yIz#6w)U3uIBA_TnvH$_x1{H; zx2ts;TRTXTA7H(mUEi(R+4?c^{m$2S+P}hN9NrDPWpMEpv|G-FCwiaUy+0mFC(P?+ z2u)9hFe_H!0?DC|ULE#gK^m-3;YI zacs+9IWyrgtLTPjHZtRBokBmI(8xJaP8G4h@-$v2qw$UjB~D}}xU4i-?f9EMrjdc6 zM{H*m?{Ct1n-J1RopmI2y-Jm$k*RAp>I%tlE}XugSY7NBM4Z-k_Ax7|!RcxoPFuFy2@e6!+RU)(vB|ZdjJOl@G2Qx9y^8 zf0J>GPY#%~$VF3?oM$UBrOu@k{{92*@-jf~`q}+CqF;|Y*wN%<$}@LDF;r;DvWBv^ zn9L~^qG#E$8gPMEQlPsDwCe}710spPF=`ou8&|$q%bEg(XDn`G)icJdCW~8{EN*hY zk;QE+z)TX?!eM{27Png1>u-|Lv0>$KXEm6J$0|*4D-@EmV}`Y=Wksg=YBZZG0}caM zGqk?PNOdF!O<7N!N+n{M<8bk>GvnJznmq`E2CW07kv=>@=YUzCX)(yECrT-8oAJ7I zuDjOq%v0iww&BmVSdK_Y zflzs=PN4>?;T6m_%%JSUd`f+WeyAy-_?q@nl@MSUO*j>eFnplGdh$UANpIAljERxQ z;bO+l)Ld)0+=EQDED>Ji^%!4dIX^>uk;U@ODoa0a)+N`L*ZLxl+4Olf{XV5vr2nLJ zuxSwYep+V)o@XS*<_{N@69PZ(+ADOROM_6_EFyr}q)8Jo$%X{8$Z5d~Q{@h;nBeSCyxT!=hcr}U*JUEk@TbX%4c%u}HH z%;IqGtUWX~6C>2d_$kM@Js9G~00N{0nELA6RSpv$$}MU-YlmkfO<3-%XzLQv z*4(kSsB`p3%KKt5Vfakh7bz?J!FQ1WI~TbXA00?eL&03EjZqxp_!o$MwuQKa)AY=Q zIOvC1D!Mq;{oBC9Y<3x#&KQs~Ri<7Bj90nfCq{1c8eJwF?Lc9tM6aNBwr z=^03r1d=D;%hDyXC(2GFKtNK}Lol2&bF8M8nRxgrv-4g*sI{TFg*7-HjsC#*@vBxT zj%QDfx-6$uY@$51hi_DAN;;4JDMLIt6ihyXS zG}^iQFVsSZ9~?-O2Abau4-6!yk8{{sX=llYfF53aPR{vPOhfY7VRpbP;JbvUx_CbFJFghN@r@-0ru(<`{ldX=o@tAm7NS z&+Eps$>Y4ahwVnCA0};=Jd>h6`YSPVuA0-WeHHW7d8z3j!F+7CT+qKf_Iup2-%43h z2(U1-EcFCmMgGy+r9&H+Q&IFwQpZ+IL$57F2PkR-cT7nLdpO# zrb%sLVB>r-ZJblMFVD7}9nu>rAWnc-or3{4%onZ-B_D`VlE@IB0m;Pm$=aJ3zEl#q z2fI=Qh0>Y(dr*ZEmvFxsK+-Q}2f!FLkojuny@BMF_d1*lfu$hJo8&W%{qb(vW`#ll zW>9DPfV}ZhcZRgmVka!dg{&vITyiYD$ei(+lfVN9A{?scNj&41Ana)fio$2aho>Hx zUmnbwUo-<{Unk5BB@RljaYcd&jzoV>H@@xo(X9iXaiPWfvP^_`z7U@$hR#%Ka~`-MhIICHdW&-^4R(I5Kg|e3Q?53;qdolVINZOXIF#_m*87L`@!$9!6`<3+ zdLM4_Il;LY2f>6dm^`gG{&nnw$&tO)qs6sNn}5qWd$gZD4L)XGjs8&LZ@yh%$nr_Z zw_N5*#*Y`T6z32D>Xv@W)t3$2!t~!={U_*OWYcfA>A~@OElkgH(?i^9ykc<6TvW*2 z6q}$izN#*^{<`2B>$%>yXQ)>TI`Fg)nNw^1O|o4RU-AV1jXIhdY-{0fJ2~TDlaF@z zn{j4S-{?qlPKBj11e|YONvp8dm51my$VBE8lvrMaCR+8O+ zSHy076)|wXUlWXPqA!B+m7(M@$5ZK6?3n)8Cw|riSKvpS!3QF2mcD{DaStq1=q#%s zL%@Hcn9B6!)OTxVMbzt$Q2pU|*6+C7T*0DA9aW-f8l-lf&2d*5;jV&&To$#L+dshTmi{O9Yv1PdgGPnZ3aBEp z0*Vkv?8PD3bq@kh!yb7@EQv*$hMd9F8H&F$xZOFFC+CJKV%fU0r2o2BfRId$ta;_C zUW``TA7{J61MxoPv(G5sEsw%1gyS1ITUi`i?+di`G&Qx2WGa_b9a*N|wviQ@Mrc)q zzv(?Nh<&=Nzxi<<5O~fph&8o%6#R_7tgp1Em?Wmr5~q1jlMR<~=OuCmV;c|`IyMqt z798}V=rt-}Zp6U(+&{n4X_ojzsjXGbfW&Ex9Wawi2OBPzy*R9_1@!yw!J;ly-){!#>P39-|nnLpqM8@Ejo*M;Ujyr@ukiayqKc_syY~7Y{%{YRPR`# zwOzHFfBv;}T;iB=XBbd~Z^-v;sQRO+(Z=Q#;e#Md|4?^EjfPHHY<&+}iT7B0WHh?F zzv<7iN|2|Rdv>kQW4>YFC8e|0&f&KB#(CYsYhvsB#J2Pyt{0=0?w9VJUZTnxlNZ;} zn9Fh4Ox6r0z++?lllaou`XVYGqAiBqV;>c>ro^4y{P^|HsP5exisr|_^C1ekuK-CncY4(?Q$GL=ZDL8m;I>ezu?FC z?T=*U{5gneQ98cx#*lAI`}&iTx~o-X9NjQAj$7w2Lso(H&a^^{)-7_o>7vH?22U$z{2*<{F^0xsAKDj5v(CAKc;iys7kV>suo~=B8gB8ms2Kw7!%`u50y)$eD=%RN>@8U3j1X`RG)RRI2Tx}aS}t_LzqXXLImZPO zSI^Td{gmd3$C=>SWDCV#CD?$l(1lgFXH=k9VfhmA!K4hKN1wrBq4+^8An3zvkIX=} zW#%V#!0t`oX1d<(_fR}g%0M-pce>Fc8M&}UC|;U!s7x5DRH~n89f_g%Ct{m;MQXVw zg5O>aH&8d27!E=zOf*asir1ETkhdU5G!bjRpIOvQzk$3dVTdN(72F*qW7cgg#f$yT z?JyMjF6W|n2tJ=(D80c z)d$W=bSERAqKEA}cUkE)D-hIYa28D>E%gM?X_t%a!6*{^y2@$7_h=@ zsltuhdOoE4p-M$;OOMD)pb=SSj!>5J7tdqWq#H8J{CkATOX?8wZ*ndyRKC`n4s+6X zy7RBuJ4nyT^j{YK4O#dE)nuTU(GnbAU;gH|b^b|bl&=lMF(-k7ITKcHceZ~?44CTgB9l?ME-vqh%KzivVTJhH^|FV3#cVL0cgfjG zdqo`kD2;IpA3q3IrPG&73^ZyuDos1TclvU~(%JGgEdb9|Nv$`QIga~2ipfgf7xJAY z&`m*NILMOBaMxm$kgaJm4g;ISxmvt(VfRs-aNv{IGR@X{rQzzxj0j) z(fJL3)W+afyfi`i#^kIhc%~n9&LqhmDm$EN(fr4}q`WAAx~OprQXRsU1c=jj6NSz6 zvdkGwp488&D_7yo(|5ZVaP8rfu#!5uarkHOMg0eGmO-mDTBAvW5|Y;E5~>UjOG-^kV5s{j+|lN zR9?w9POgP90#IvNAW?MTLh&9)N(UrIZ)-Qh5jQmQ6kJ9^@{i@_h%w0a`4u+fvoNg< z8i9T1k#Z4_{$mPt#uyKvubdj}Vw!JFxGRb(i?pX9p~S&IEGQ_LOXBQbn&}Lozr&e9 z{`fNL>W?nURNwr2(!@YX3O*wTeXU=UPnOF^Gms2Hjgxj)6_LH{aOsXhB3w-RP?W!^ zFzpc=u?0}@1v-b`M5}jR1VO5K6g`z@A7$JX7;j}E&3670CKnmP8BT}00x3CQpEI`6 z)gr|Am(mKfp;0y{@^+Qbd#<)XZC(3YiXPm& zjMkBBe+y>sTrg9P@@l;v7~+u>a#0ho_AMy%IhLDeivU17YUsYd&L^AczUta-?dg5U z(xxm@=E-7!)3+a;pVznL0MGKpP|Fwo=FcgFqH+C;VjVZ$bKM2!8DGXHNE4<2`?_vh zdrHoa61LL<$>++Wyol13FMwscGY0#BI*7f45nSQuRDn?rGJiQW51BhAs_=7`(dcXe zV-j{I@f%;-8O^4%sUuwn-2bxJ$bozQNqKNj0Is1!7a$bk%{lUtTwPFlci#A!2Gi;E zKmM;BM$u%;na+8;n2tQtqM^~W`%_+b`16r@g3+Vy+?$c7A#xI&uAwneK?KS?{Zr#( zv>hLYlI-j_#n@6PseRi<3Y+orF{Txbmzid~glA^QN?7p$5RuLShHqrNLY@W~C}sFp zlE^5jnA($#+n0_shgk0d6f_(P@@GD2ke9MWTUbSCWN_t2VC5OgI&Klg@JJZmHks$gc0pk zjT16i9v_>>IYrc%$298j$S5;3#1(l%!+_c+18O32!yEI|`3yW}>g)tB{J8VzMH)i{ zr$h#e8ht^~#qA}=n;qB9!}9=M3P7@ci0C9MuHlLZ;R-wFyJYPD0g1at-rw{U;8Vv*!Y29-G$5-1G}q($_+n~( z{^FgNe@x}IzYI$M{g>*)-w9z}v{$vHH(GFVz(0PWGvj^d!l|KT(J3doNY*{ARU+`u z13sR!&RhICkMl>ue>hmTX1let!T z1$J=SBR%7~upphK+!u&z>ayv_cD?*>7wTe^Qu zQU?qU1KKxb#ncp-lEMUU{M&S+UF)l|x^iOgaj=t`&%;5ciJOMyt>AH6Zy5@-? zag4tyPL|Xk_ygoCgcel(9==WQ0kmsujbq&JKS#B4rJL${Z(IjKDTv{r>64ukZ8_6* zzmR9 zy^qpH#AScFa`3ynzG_~>YQb}lcrCaBCibQm1`FB7UO*ZL05F7!+(u-ry!K2IBPdI1J%1HkFH2#Wu?%`hy{QdIr_h<3n_OOS4xhKS} zHbAL6&;MzxW}l<(Z-PH1-#IMO;yeY1xWnMD8OTqdIvdJZ!CnVg7FD?0wS;#uGKQ7P zIqmCnw`1oT!$@g-Q$7#Lyw4?Em&>PFb=D(DCkH!Z<>Mn;9?fTIr2)@j-LC74zE zP%(tIhn)jM5u%ZvP#NFerOwm8gvefd#z0`W*dRbf|Am8S*CAB&5^HEXyboeN^x8!o zjb=?UUe(0c$$a(=eRp1J3OG)fD{f<Tz^9bM&a79N5%Lep`+BciTf=YpxrX zt%5N{wmVgetH972fEUxE)P0o3-_e5?ty%jt_oGLCHmB${9zeeyxPSBDR*k69oXben zitQ~xrcoej<{ZPB{+{3|Gcb|tZ!fxy!mUL&2^K@`IcDpH52${&e~PB^6;mD^JH6zY zd9a(@;~t{FSoZq+#lGPSKaTxbYk!v8AFC#;8@$=+g{FmV&9)+EYt!xv*6gO5BR%I7 z?ZU&-QQ-LL(cn0KYz~eaf!12&@Rz{jY;k7CWG=r%Yg&~fa`>F0MLa-}5m4k-@HTu? z6=d*3k$B3Ic`?Jt&}6FW`xd1oLzAefZbj1y7y%QlIlv%!#GInNEs`7^Df;3;56M>>4U+Xnz|wR3cv!At zG&_yCSiZABRC%4h!t!a058HI0`Mg20C47G7BaDOvckmaS^=OY!CEXj=`hFcw3cTsU zP&%u9)Wm>{lLZ|pF0DNZpXEjr-Ga6u-{#id?Enmyu4*gS+ z?H?u)uYW4P!K&%?P?mR$J~?{SH&V!qz=_s03uNTTIYl#h06|DobXo?%EhAk7zwwt8 zGS@jX&vsEfS|~m~CI`hDgJPA7;=(L4U7={;;dJN@lbrLslJC))FIhzUB0rx~^cfGJ z#qqnMXEaKs?o^%1n1*gJ%a>#IP0c>1eOxU=>7GjPD$T{~0U)#%{Yd~}ng!Y1c7HBc zzD!dl$(P^oK&_H5^_g0a(^ABfFA*v>ef2X{_~02iggC}jc(_~PbGAa{2z)nqMKCtA zxM1OBj3|G@;?5#M^4qkZb+|8bDDCF~OzPDBzj^KdNJ|Ra{s!ogR-qfw+IwedEQb*f)1m) z71{7&jsjGWt^b!Qr20qm;MRXgrvA5vX6qm8)!$e3&pADZ9($Vl{kiogYPEW24_On& zdAVofG+r$~ywCF5@WUD&Tz>e+U%j?JezM08?~id?{(ID1rzE%LKT)!^=yn0h<%hAf zJ69pH<6%d%<}&4s95<)vVjigS49I;C_*>++%;MIb5~?R7ivpilXxr zx(w1h!Y03VB-z#+t391@f%vP=pu!I7@8n1&7*1P?U1iz5LIb->)wlbqlb1m8A4Ye~ zA715LJW4vUpr7jHR~-o?EB^x3dx!T_EQDSCoc1a? zsl?we!2h(1z2?{we6wGg@-9(;&Q-}|&3S@*r-5r$8V4KvB$V&N1OWNIV1bWnkUE1r z=g@+p?wK6p4>37jHaVQ5$w2Zpqha>lt{<+$zG1v*d`p(b(VCAR6CKFY@we-C_8=4w!3$~=BV4_~P(prBjz&Hti{{GAz!`M9=qu+SlWE}Lg0%S4qM+~n zP`v68^Y$@s?}g$Am+__}4%A59a(QWt9~q3FZzxX>l)_YHl-ek*E?M(63w!5q)0!|L z$%~%8htEc^S6NB28a>bmcJF3VwD8os8rNEBd#BBb*`FwyktzFMOaHq~S?5u5L!_D1 z2$8PiM?{Ko6gD@cg?4PRv}5Y=k{$v*iGLRzfA1c_LezbqlYx?0Jnk zANIV6U#EsYSz*ajhxY&G>W=M??`hj_erdmbFLQs#y1$q(Oum5oJI?)OuI0PiewzhR zVX`6~&2OdqJJHQ*+HaGFxJk9{?fkS;iwbC5K}5a@@I)wab+NG* zX(62W{YJyfCu0%C)Y|I&fK^X^e*{_Jw18POt1wIH=(83~_xkuI4k5}u5J=p(MaAPI zYW*cQF67Z)a?5h_bH`fq^A|@y{my5b@ys%Rzf;gYdSpi(W2Gc@?1&5<=G^F`vTV9% z4>pstyM)QE(wQI2Xiugs5@K@Tbv`-CXsYa=Bk$#n#;KVB_v}G`$eCyd9q|S^0*9T8 z3r~E%$9)}Ufjyn?K&j*85J`D6j|{+;Ev&pY2J2PyhypI;DvM3!%lVp*q8yNy$`>Yp zEt{@Jj<6G}mwWX>@b!7-W6k;@NG-O_l}FALm9|j_%Azs3AiTHP2Q;0AkE3&XmGpRj z^Ec2B>H2#ll(VZT;cuR!Z;7HwMknh#nIE~DR4<>hSNhPbE++lb=!IyI#5EXOeD&A- zEt#6;&>b2|O|3vq58CNRI)%}CuHhk=n2AjdcSNFeY8(6SxCu!OYSwwl#>9l;XXL+u za}MUSUycM9qRmUh>7VrEVrk6mJjosVgHV$A`Xta*t}lDtS-_uIqW8wv= zT&>Cl0^LlmJEh&Md z4aXXwzeK8lJSmnq|M-q;HLt`7VHjXDiy!U2lz^nm1fRhKV;aa06GUA)1roQJIIw|) zyCK{CHg^5VAiYHVb7JTOFUF_z+vL3 z6R4#med;TRK3-iRokcTbdrs-?H>! zI-?MCqUi&9otEC0zs2c2_&YYuy>JZ`>E8UUOrtP2)P}}CSKc4^q3%P1T6@IhqS%0F zsV^s*NCWe1JqmB4f!UM&S{};uP{u=r9x8ZXLX{&=qM=p~0u#_fEe{MRJg0aVtA|lM zjMKwd9uQa>IEjYIdYH(=G(AkN^OrQ=390>q>=4)cZ1+9NZ}hqHclhCyMpE3BG>IxX zEs*>p2VN1oU&ycAwO_@C7K9J*&u=n?P(~w1!U6<}LQQ2C$T2YDFDlAbAhx|Mki5b- zWo7DBpt$?B$%c)wEnoU?ke^Ck$Ko< zgT9ywt1s2y{IoGj8|$Wx%}=XVnu5@%Ty1{ZK&6$rX{GsTy45RH=%xko(>`J7rwpe9 z6Kg`Y`TiSpL0C$1PW6q;PkU5pkGp9V`DwpX8ZyU}o1C8(AuY}QbTCtX!nsPgNC~6z z6SSY54l1EIKjCLeFg%AhYcAgMhfeRQ5@q=bx&}CqfUyY!DUr*10uJYal^h(-1Cux$ z&I8gL4(FkQ2Q+8nq1I5@7@vV|Ear~Gf{t<*%@_mC~oV#+7I3w0zMoc^Ns}xED#N<}6;G)y_tq^j6;R zo6eNu>50U^`)}5dRQH*?o0(#^RR$yM(1IK5|A8*Si zQFf7JRkr*Uro2@D^b1V!SK?_vRej8ox9{)Dx!vJW+Tv`g1#+_HKL?xSFU%UN??C3G zXHW+UoYf>-eTdD@jo8Z))v>nrSbL%WMk9tf5S|<`@Nx=|ANEYZx2FBGKq5#N7W!mE z`Yr3!C7ftJCpGf`1aDojCv?99HmGj7Oa(om8*4#C8QYUr&KXHD4(Oe;G#G1{Y!Z1u zHrX%8FZ+P_vkwS6`+!6<4^GtkfE9*Vt(GVh_*dq6T$if-F*lj#a* z*@n7K>!0bZpT19&nQrS#U(_v}Nc))di=m!$_>Ct(%9r^=`A0J4Z?xrCTR)?jERE^* z`Dg@o#g={{Z+((Ce_MZ|l{$Ta{Krm7*iYkDbS*=SDRLfiFUxu;pw_|YTs+B>1RpT- zwKJy%p6Vy!yChRvo!y+l*k%z zStmad81(U70KMh79MEwC`g#l6-xQ)Ss5d6`Bh>rgx5CCk!0ar{WXy$zV!-m>@a^ts zVuQ%-pZ{+3#{>May1gmF2qwH13S=Qd1J#5E;XUaqi`ML~8vM;DR0VU2`tu?^_5D2o zg4Qe`5)j4y$HrDfYrbVU?u+a`r)UEYSW=Yr-(*c6Miah(QJkvWCbW{Pwde&Eax>2& zGp)FBj@OFKSmN0cr30R&yWA$|QlOMvnSpwTpsuUt*`xVn19hUHs{W%b$YU}fKj~=* z1IUS4kpJBckh@!u^#XZQF35ofhm`D zzin=^-hrUQx@es}J|0GvX0qfiSL#LB!$QgF9O==qirDs_`){1+SA&aonW%~uBRA$5 zWi47WgJn8BG^a#8G=dl(>7f8i6x(swy$)mXjV-&DL8AdJ2AKQI8s%$_WVyRer& zCT8mOhP9WrLB?q<29d!ngIBovhk^H_g|erv`i-v!5@$EYZw~a@h@q8}XYU5uTAAzH zInptB#pWfo9F}~=ZIBpmi3rncxt6lUf$^6f9`HZr=*B8BU%WIvu`pWm*E>zqj-kWn z6y3p#ns(s;nik}fZCVp-DDxNXs}SC%eP7U(J8$2s>gX=o7l;Y3tAU@zBU&@iBC(*K zkl4*d;=TSLv64@@^qnWTrv58_vi?(dUO)Qd!+;j0b2-AcFJYnn-_RQZTaxEd*$#Ja zq_;89p*d3Eav!7kh&v3?x107J$cu=+L1S(=_&K+|3)O?By)$^)IlV71J>8j~GE^R0 z60Mp42UG2`T}9@Hc~P|yRr?N~77YPI=gePFAp^bXOZ8D4YXizG9x!Yn#M`&# zKaSNpWZ&{=&AArx`3#^rMW?xt_Yvf)_~f5|8KF9i44Wv;v0H%Sfso-4Tk2nZRB9hy zg1#?>lKhubt$a0w5)&)@j{zXIw8$;@IhswRmS98A9@ar_S!qBgV zr=8Ppe^P_J-t06UgRyP!>qWGLFbfjkxjkA$kJ9zN3VQ5|P<%@+JJlAL+a9*SX}137 zdGgOMiLZ%$#`eh$9m2v*_l?k`4X+! zc$;bcHAWbH!iz}u0IZa{#T7<&|IW5wfNlGE`ib@{Ul-$5Ez6Zh6&ZM6K$D%rdzIA8 zeo&5lv*Z6nIx54NRu1ibn`zYq!$E#t)T*~>P-;4#@OfNe+LLx`HS%yt?8EEWg&O}l zm`fhENFFY+^6tE}|0YwRTcM4_4#T9>J34l4df3)Vl-6v# zr;6wB$&SS+9qr0Lwad`+3)=7#?Ns$y1FSxStm_`3lIKO7JPD1INwpB9H_%d6hTlpEJ{j^VHF*tGgn9 zArzbuM@h#!*FyZWA>wH+#C-(u-+b~f$RGbm6FdHGsi~Of82`L@;~y+i3BKB1n;$3* z;GsnyqI{w&YSU2k;GBK5=EL8awp?Xktl>p%xlaIQ^NH?j@$3PHeANzVK|QT5vP z^R#pR^($W&{Aa2m=dJEUqYz1G4bF8}kib9}B- zzMbOJFN;sEeAcLq*0kJWuvu+5N--5YuPSLS$?LO81H(4Ov>}^7|AQYcgEb1_o=-j?;3tFgp3_de>>NtO7*mayi z$FLJ-_}uy9N9KxK{v^Cx47{DmyS-GYn!!1Hl z(+3B*2#Fn2w|jjs*U*2VX{r1#X7IFgd@k5IKFF)RHSAit(s%E%Q2)QscXKA3nwt#$ zPd3dxiWkv;TQSWQ7aDDCf^F||_|Vcn$8onlbj1`dT1Ue_x(D zj<<}ywox6tQ+@3z0m69GxFV>dTyc9~SjDKLq~KA8E*UwjqcRcfOcM&szFi z!PCy^bE@)r{z$p{QV#wo)W#;0J7wfB?d-V0zY*N#6wTm8_>UC+7x4+h$V|eUIjt2` zAOs6_E~F|Z~C$Q1=zD7rEOI{JS`Bu zToAV!5OV~=ZN%vo%IFLZq5j~|Mt}kUOcQ{XLjmSd;WPsvC;%c^e_MJ$28W?Lz=0{L zAoU3321J!W%*X|?s{zr&1MwOo7b3iVwSGj!M|yDt?P`i{{hp%#r@bqIj;c!2ud-m+ zEI?XW+fo4&gphz>0-~v~RY3(JwuoyIvJpEZv68U$bQ=gkin0{#7Hy|n5tr$)7imT# zGKxuLYg)Q#V>3vP8d0$xC7Q-IA}*QlyUR;ep>xL5ea_4|^WHgC|NZX0|6Trfw|AHS z=0>x&-$@Dl9{(ct&64#2i{!n%AHl!K@?$c@$3=*I>2F|z=&ti2@W@p_`?|pI zjOO1W_#f2##gf0o;%_;H7lv<8c{zMIQK!kdHiZjKSIKl$Gu-D!N9EKb!p+g)_LbrG zvvOL3f?&2sL@~-F_o`^_K7u-ehr~_z-blFS(y3_m`uhQhvAKUaz^Yk=zR{?&{%)d#L1ILGG8Lx$_0LM{}Pi zxd&U^0lW{h%@q00Cij|X?m>dPkLLa}>qs#9aGlQgb8JD@N$$`82JRWr+^?-;zF)z= z$oB)1`$3C41-rP+*^>K3awkV~Zx-BlYwjB)_l*|!UyxceMRMOk?tPa;No}#<&ez-n zCHDx6dn-z=`CF7DltMST1JT^W1ot4#{RaI7li#e>`MxF@DZeW$q?SnTdC}be*~omq zhJTUoM^l4; z!$bS;Ct+p&r<16RGm$-0C`-ZlPs?fI=Wx1~PFG9OKWaJkN4sT?lD0w1HZT;;D5gY` zS6Wt+u;px*mtlM^MR<$lRP-G<&7o6=bjr1yu7GrRx#_e=I$dfxW%L4=46?)&f9f1A7DCWfg`dU={sWCclB4nMR1@ixy7>o9QAn@)?kbXPrCiW zvTs7hcVT78h@``QlV!hwvYJhMoLx^|Y1#j01okL(_K) z7M>fX-?|2+&e`-6FyIUb0K2{403qb>ijbMxzQ_@R5^G)gqoSA3^!n7gLWbEJVS@3! z8uF9z6tWN*2&-2utB+A85tcmallb+4oyWj%OEWv^_D&Sd;Vy)zd!b)7u)aKwf7X|# zg=omXKA*3=i(u--I(qUoOn}nGFgXox537^D0az^w^>4?rBvu!_Kx%XI1fK15&Z}H)mhMJWmT>1RDNAmi#xWqvCv42Jv&$0mNA#gBXW|xOWW0`-=QK z@|qzWCx3!w5`I$7#Y*$NXo16(3@mx}u(t_|#ml0#}rymLR|cLxsmg6D#7!+fw?>&`((Ug-7|bFYChJCwHqB$&?w zxP?QK`D>W@d6pOWL(h!TCSUd`=Q?gAg3|Kua#Q`xz+0F*ZhzlX-0kUo8WWzgu+9GU z=owFU*UMZ2I)EjK$W7EE%cvsPKtn7HNfDef%WPpJvi)KBZzF1sr&p|EEVBY{xe`t8FmQ zY)ehE7htm|_B@Jv5van+=b!@GRVG*6)lEhtKU)TNu~XqS3^fqYr;t0u_#-S}qfq?~ zNERydhm0sVa=*p!m}Cf|+JYfZG7LjYEK7vxlMD}741URQzl>poWO&i3OQAVjGL%^i zZplz63P%43N-vVM_c&wVPDb+tMHn18*0|>3gj7y$_b| z9rg6hQwS?8-wUPh+okU>kZff45j6eo9aZp+G3k#l6zK^605rS^f6@C{p;VA67#>XI z{Os>=aXFgKe)3>!0NY()?Fn5!n4kPzJ!M^#M^E+{+%DD|9>n+W4KQ)}bWh`ZSTBi) z0{Nz={XkW*s$Ff674+v#%hX~==WLqiq}IW%PCg_aVC%}1#)xM=HwA(e(9Fr?4` zjd%OJjUP1t4?r|hk=C=YSe9xXIR(W}| zE-E^XZg4*O6KB&r`s;mK*20%L!PlaiKJLAUDO^*oGZHlHq1DwD)2u5ex~R9>HQ|o7pVcsChBB6aRPQ zo(3xv5lXhvWcwM|yq^xzb^~d*fp)>%(>RJdEy4NYR&yQ#5}}+fbJn!XmqBTkH{obr zCs-1^7sY0U_sN*qDmbt6=>)_dlUM35Fo(c{dt)DO`|23CCc1r*3^_FJOZ0}2_{OOQ zZXk2M)Ys$qqc`xmH_)aN!ST6y2!0_ZZ|~SRrqk^i<>rWWyNGU_8(=}y@)hNAFcKxp zqyY)YDJMTvqaolp7-O{iMcTu#cm*M2w^zRm&6Oinv#6O$fOL( z{{!<>l9JMrC}w}Lc@*}7(H9zlBWEusDh{vNLKl>UfJ%;kiur3;IiE8k=b-E*q5#6J zCyoouyWx!cF0cX1d;kL-wcfezFV3bZNS-fL;J`XjD7OIzVxe1j4LY{&j`-bD!x^XZ&TeMDxeZOk z6rKjwO+-sKZmh!^9x}aE=$aMpptrdK!_S^-@Ib3MlNOC_Rz;;Eck?E;@3v47nqP?$x36h%>IE7VRakB7zTrlcw|G=C5nzLyh z{q?keRL=d9c>VrXN`is`e#>ciofIpR}4c z!eZkBM?)(#-k;;Vvlj%%JT~r2z_l}YC0}_cKQ+980& zzYYd#rVAPHICsr)n0rw*t>ohMMwNQ!+6u&HcEAUu`Kl;yC5TVTFHOdrsP-Gh^&dR* zOZjGD7FN1{vGMxj=;Zl`E6`VXk_^no1wAY2qQMChbSI=ujq_EV`tRrO+=eW zrQ2BHEM!R@r$9czg$iU53=#nMMZ(-mIph$028F@oPbK(3fyD%GDFE#p4)aw7mJ;ky zAcbIu0yr?@Fn_54PJuYghZS%U+^awa!8!oZPTv;uF5bW^i(D`3fLFIQ&b51Kouj)X zGmRzl(_y+{`~_Vvc0Pv}-Ip#XUHBl|kO%W7?Pnc8Ua(KhCnv{E&Zfuka(uYqXw7a8 zucCGWV*uB@1TR<&q3?ZPj3!rgh!OELe12o-krK8>#A<-sWHA&Jc#^|(*xW~EQYTzmPP%!6SD+}!Nm6(7^aCBE=5lC z0QXdp&ci{;ut+lOkPMT)U>+P`5Ch!ZmhVTT?^Nk~qx8K1TSeVFlIfcR+|8En+okUo zd{rYcOEC|C#BAwi*xbyF0q*v}A|2s}K*RL{j9g^#XG;E^n6L!@2pQXWe3-g-3}tM) z!lwpsOdPYHc7FouRRFhzwnHl!j^y^dUF@B0fg=u@LG|npG03_$1&Rh|1veQLFCey z(}dDmD8pl+ifRRfKE9qg3Z8$2ju0FGM>(Cos|sZ~dk|0H)#DEL-$tDPc|Bww&*2;!U1_yTy* zh;O_v(5b)mll$Mwh&iuy?>nMZIE4CyvNIii#btF!oOz4(Q;sirv>d;ZeulzNWV_0u zxE1>pw$hmpLu`F({2*4pb`I6qulooE${WDtfU8mr=Q;yixN>#?j)_k@*SYo~IF>d3 zXb4<>1Ec%JR0N7$eW(;!xQ(PA5Sj+=B|{y-i9?)22`0NmI#YIyhohJ;M8>yaR1)q$ z3y|4j_G6%WeC-L&ACMdTUfPj0XY${xE5W^iXE15_Az0_aHtb0}-y1Bz;mLOA+Si3; zzcf$i%7NS;*tj1S-rybh#=Ky=1M1(Q?Erdw#awz+_`PI6p%DpQ9Mkm#ZXGf|I4aE- zD06v(S9?PJFqCP94tS`wZn!c5aZkl)(G!}1?U?w~bvDn##Vj+hHwyro(|?cvlq8S` zxy}%$&yGQ@U^tPQ$s4-6aYsG;ree78M{doE^6~Zu?5E-892@Xt|GvJzH+Tz<=AqUJ8}7k-7FvVfJMocQ&r0W$?;TLTgNTHkx1&JQ(ULJ&l;0K&ynN$jNPV!ExBM_Yu1l z$QJrVj>A(i; zjNHYvS!lzZO+z78ytf$r8%S6R?jK}bLmn_NY*I82H7 zwOD$Tk|=Lqa4`vKMn!X@I8a*5PF~>sNU~6(;tQnu(jvIS1ydF^1F*RZ;#}&AY2zuF zb-YnGyeowg82Qm+5}w&QfB`02zLQuRN4c%T*ayRu6F^bALs~80Ka;*+lD=n4-|veV zL8Uw74U}bs9Nq&O-h#iF@{D^+qslN;56TNAJPlLPP6Q$JJQf!WUcO)f&Jso64V5>J zKf|kNKs?UoxoALgMFZl=*?1e;k6h7ycxG-~Ep+U8uz8aWNE;UMavHwut{Z`Y@dvJ2g#)G;S;-9`+Ilk~)UPy~!-NNcfqVSSIW@ zQ)n&@AH-^&a)pDmdtTbHDq{0M4hj|vR95&=Tw}-jdj%o7gn2_&%t7m!Jd4jT zc=j*i%j9yP`(3P8S^ZDAybpTF;po_(6h5dPs*?j%p&sfeW(wo#Wq!RAbrLFGZ+73p z*?64wz8q6PZ1iHv#}J_cE@~YNLL9I!cZkuA4-;y@%ypIR%gx{`naIMC!ex-zNjurl zQQ#@Ox^qfiXxzDI_LD?+d=wpUW0rw?4@1cdQD9L5d)jzhIL8y{;eWwf_6FypRh*4h z5#iuUrA!3TkD9n!&CT%R33*V=P!BhHk{W)O(C}FzzLeOxV-_h?m@Gzu-hHk3s_OQp zwdMtmsremmV=KC`sRkw?aR>;S1nN)XhK9VfL%2Hwn=m(dvpX?A%Lv5$L?uipe1j(j z{w!vsKN1tPr#W?Eo7LWeaRb2f-I0`cc37ZJJ@^1R&38i_-ejj z+{8c_w~yg&x(7YIUshq@c0$e<>06jGf0xs>2V2@(d%qkV@;Gcm7V{4xyuVrP;|nk$ zy((mC=;4I&8BhBO==5?n{hS_q(#yDZc^$ajh$wc6P< zlI?)Q^rrolmv6`Hex3;zjzKDD+w=v76xy`}X!08-}tx(|+ zJFXu%NUbs(lSYL+&oq3Nj)?`NW|OnI6<6vsVF4-J-65LVjVbO83x-CZf)GM>HWLlw z_E#bFnQ5Kw4ReNb6$`Z!jkMS|J_oCr-oPAJB-6cjPKF}_rT5?!0ihvLtdYTR%ydM* zF67VKxZ4qxsP&a7Q24G2=LN6uQ4xhVa569Lle|DRx`693tc)wqO&OTHL0yYcKLm~T zZ^oDWx}#|U&fvxW5S>AXSXt^{jv9^5;0++v8F0bp8O!%Q()V`hJ6rlb zD!x$J8Hn%U^_K4<>DzlqbToaX?<263D`*%XY-Ll6u45x;xRf9?y-lu@-hvm7#A+xWgy9Odo1;+h>8m4K`NHl83Z9{fD!g}RUJK$nLjuRAbSmM33N zU8D1R@UY153M2+xb6*wtU5&WhJGf9Ga{hD!`yF$6)O_CP=6bf<8ecC;5qdBoL#MYXn|P%$IH!3cQ*aeG+zr3hXCF z2WFT#0^dfAkqR~|3cQ4vUt*?9;2Vki6Auu0Au-17hG_^qmso5x?8FeOdvq@G0J?Vw zJe3%ql*qrp6N&MOjQk5cmiRp4Z316RjOifcU*KWH-yx=O7W@krtU0>4W9U1CGv1H@RL zHOx*lD0u(GF5(V>cM@Mjd`RH!#1|8{2>cZBCB)kV-by@*c#FUf5sxO`B=CL2V~8=h zLwp;FFD0%Kcr9@%aiPGgiPMPl1@;q6gi6;3Q;Bl~K0!Q_ zC16XymVhk*TLQKOYzf#Buq9wiz?Oh50b2sL1Z)Y|60jv;OTd_C16XymVhk*TLQKOYzf#BuqE(+Tmn=y@{fG%@cuq&+VZpI z_iZF#mzQrNt+V~w@;h55-^Rw4-?x$0*?#}O$?t3l*f#&05}=|oKDr!gx=a}~UJ-}a z#^L)ErlK=GR4B&>DyXVSbvRT!$ESrF+3;w8t|p18c#aPhvhl$+4}yO^HQM2!;x;}V z|12pL|!@l4PJ_I6gXF<;~cj7^%pPkB(18WB3M0dn&%;Q==F^`AVi| zztBBy7*xE+=X=z6hlh&e_@o#zBr1yIqr;~nIr&I;Dvsl$<)DH!GLFfRRj`In;Z7xw z#VX-YDJSGJSTRCln7~>NGW37ke`ki4P;8jXRr=N|x>?aD6>U~@ucC()J+5e%qP^ac z`95FKF^W!6bhe_46fIM9xuWY8-K^-7iZ&~{SJA_Y9#^zW(O&PW_!S+a=oCd~E4oP0 zGDVjwx?a)Eiax1mv!Z(yJ*?<)MY|O3^`44f(J_inQFOMVixe$Wbh)DI72T}plZrMg zx>wP|iXKhk$!O0PQPdvpV&Efie?nq>U-EIoNk;Q#CFYrnz&4EP^pptdH- zhGn!0pNbY~e<)GfQ_&%xams(8@((B;R%`mtkF&o?*%zvC&XwMchm?Iof()OEA^F^{ z!r!9&Q(-2bPw`{;4a%O1Hu*4q+HX_g|Jr`nFl_mr$yQ8{O(rvHF5R_mAw%c&M{@5q2#IC#SuC^arQ=f zMO8&zL6P5Iw8}_FsjATCy3&<(MtXTQ{)(%YEi0{(D7~f<27hT~ku;IrNMBN0YowQ~ zs;aH?8|kG>3(EXO%SsEDmH?Gk)u*e{TmV>6Rafe-f#;&SA_kN0uU5f^yX4+&5AV^mPQ^?VaWb!Zz3YU}Ek2nWGlTwPIBP*zz{BSS+F1^%L{lImp=msQq7R>EgZ zRk<|O!cT2=y&rjiR7w#6iURUg>MtrUEm&4vQV$UdmQugJ+F!7&w6+#5(yF4WtlF}v zs;;XjTV?TngMf_xUN**cBCo#E<)xLG>BZ48i#H-tBBB+L(pg?zQBqr1h|o<7XY0y_m8DVLR3p#jnS2n%iBi~bqFuQz>$xRUyt`R zJ*^xxzm8v1wr5P2=GWs(P4)N^Uc{rr(YOf)hNb!S_*qjue%5?C{aXH1fE&%P$IqJT z@iY0!6VK1~m2NuznymnMH1+6#yR<$_{)50wek$_i)AGw$I%Ju|`roAuie3vE9siLb zM24Yh(nSJVe_HO^;$;}3`8%>DzowPV+NY#C|Js)0dUEtsBrOR^GmdF2In?KaW;b$R zisskjdrf=33nWW?`u{DCUw;p18Ylj<kx}GpZ zKAKM=!&nJJbpBl`UQG|_ft_NA=l=-yx^1Et{T*dd298IUFU_yt!-a5*j=y7~%(tdF zWR7|?zqT3=Lo~nsKGxI|TYS&T|7`FxOf5enN5XgeTRF;g#+ Date: Thu, 24 Aug 2023 05:54:36 +0000 Subject: [PATCH 60/75] Implement expected withdrawals endpoint (#4390) ## Issue Addressed [#4029](https://github.com/sigp/lighthouse/issues/4029) ## Proposed Changes implement expected_withdrawals HTTP API per the spec https://github.com/ethereum/beacon-APIs/pull/304 ## Additional Info --- beacon_node/http_api/src/builder_states.rs | 72 +++++++++++++++ beacon_node/http_api/src/lib.rs | 57 ++++++++++++ beacon_node/http_api/tests/tests.rs | 101 +++++++++++++++++++++ common/eth2/src/lib.rs | 17 ++++ common/eth2/src/types.rs | 5 + 5 files changed, 252 insertions(+) create mode 100644 beacon_node/http_api/src/builder_states.rs diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs new file mode 100644 index 0000000000..90203f2d60 --- /dev/null +++ b/beacon_node/http_api/src/builder_states.rs @@ -0,0 +1,72 @@ +use crate::StateId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use safe_arith::SafeArith; +use state_processing::per_block_processing::get_expected_withdrawals; +use state_processing::state_advance::partial_state_advance; +use std::sync::Arc; +use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals}; + +const MAX_EPOCH_LOOKAHEAD: u64 = 2; + +/// Get the withdrawals computed from the specified state, that will be included in the block +/// that gets built on the specified state. +pub fn get_next_withdrawals( + chain: &Arc>, + mut state: BeaconState, + state_id: StateId, + proposal_slot: Slot, +) -> Result, warp::Rejection> { + get_next_withdrawals_sanity_checks(chain, &state, proposal_slot)?; + + // advance the state to the epoch of the proposal slot. + let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); + let (state_root, _, _) = state_id.root(chain)?; + if proposal_epoch != state.current_epoch() { + if let Err(e) = + partial_state_advance(&mut state, Some(state_root), proposal_slot, &chain.spec) + { + return Err(warp_utils::reject::custom_server_error(format!( + "failed to advance to the epoch of the proposal slot: {:?}", + e + ))); + } + } + + match get_expected_withdrawals(&state, &chain.spec) { + Ok(withdrawals) => Ok(withdrawals), + Err(e) => Err(warp_utils::reject::custom_server_error(format!( + "failed to get expected withdrawal: {:?}", + e + ))), + } +} + +fn get_next_withdrawals_sanity_checks( + chain: &BeaconChain, + state: &BeaconState, + proposal_slot: Slot, +) -> Result<(), warp::Rejection> { + if proposal_slot <= state.slot() { + return Err(warp_utils::reject::custom_bad_request( + "proposal slot must be greater than the pre-state slot".to_string(), + )); + } + + let fork = chain.spec.fork_name_at_slot::(proposal_slot); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = fork { + return Err(warp_utils::reject::custom_bad_request( + "the specified state is a pre-capella state.".to_string(), + )); + } + + let look_ahead_limit = MAX_EPOCH_LOOKAHEAD + .safe_mul(T::EthSpec::slots_per_epoch()) + .map_err(warp_utils::reject::arith_error)?; + if proposal_slot >= state.slot() + look_ahead_limit { + return Err(warp_utils::reject::custom_bad_request(format!( + "proposal slot is greater than or equal to the look ahead limit: {look_ahead_limit}" + ))); + } + + Ok(()) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 8e316834d0..3aa10139b0 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -10,6 +10,7 @@ mod attester_duties; mod block_id; mod block_packing_efficiency; mod block_rewards; +mod builder_states; mod database; mod metrics; mod proposer_duties; @@ -32,6 +33,7 @@ use beacon_chain::{ }; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; +use builder_states::get_next_withdrawals; use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ @@ -2291,6 +2293,60 @@ pub fn serve( }, ); + /* + * builder/states + */ + + let builder_states_path = eth_v1 + .and(warp::path("builder")) + .and(warp::path("states")) + .and(chain_filter.clone()); + + // GET builder/states/{state_id}/expected_withdrawals + let get_expected_withdrawals = builder_states_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path::param::()) + .and(warp::path("expected_withdrawals")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + state_id: StateId, + query: api_types::ExpectedWithdrawalsQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (state, execution_optimistic, finalized) = state_id.state(&chain)?; + let proposal_slot = query.proposal_slot.unwrap_or(state.slot() + 1); + let withdrawals = + get_next_withdrawals::(&chain, state, state_id, proposal_slot)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(withdrawals.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json( + &api_types::ExecutionOptimisticFinalizedResponse { + data: withdrawals, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, + ) + .into_response()), + } + }) + }, + ); + /* * beacon/rewards */ @@ -4503,6 +4559,7 @@ pub fn serve( .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) + .uor(get_expected_withdrawals) .uor(lighthouse_log_events.boxed()) .recover(warp_utils::reject::handle_rejection), ) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 3c72441c02..46cc55591c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -28,6 +28,7 @@ use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; +use state_processing::state_advance::partial_state_advance; use std::convert::TryInto; use std::sync::Arc; use tokio::time::Duration; @@ -4341,6 +4342,72 @@ impl ApiTester { self } + pub async fn test_get_expected_withdrawals_invalid_state(self) -> Self { + let state_id = CoreStateId::Root(Hash256::zero()); + + let result = self.client.get_expected_withdrawals(&state_id).await; + + match result { + Err(e) => { + assert_eq!(e.status().unwrap(), 404); + } + _ => panic!("query did not fail correctly"), + } + + self + } + + pub async fn test_get_expected_withdrawals_capella(self) -> Self { + let slot = self.chain.slot().unwrap(); + let state_id = CoreStateId::Slot(slot); + + // calculate the expected withdrawals + let (mut state, _, _) = StateId(state_id).state(&self.chain).unwrap(); + let proposal_slot = state.slot() + 1; + let proposal_epoch = proposal_slot.epoch(E::slots_per_epoch()); + let (state_root, _, _) = StateId(state_id).root(&self.chain).unwrap(); + if proposal_epoch != state.current_epoch() { + let _ = partial_state_advance( + &mut state, + Some(state_root), + proposal_slot, + &self.chain.spec, + ); + } + let expected_withdrawals = get_expected_withdrawals(&state, &self.chain.spec).unwrap(); + + // fetch expected withdrawals from the client + let result = self.client.get_expected_withdrawals(&state_id).await; + match result { + Ok(withdrawal_response) => { + assert_eq!(withdrawal_response.execution_optimistic, Some(false)); + assert_eq!(withdrawal_response.finalized, Some(false)); + assert_eq!(withdrawal_response.data, expected_withdrawals.to_vec()); + } + Err(e) => { + println!("{:?}", e); + panic!("query failed incorrectly"); + } + } + + self + } + + pub async fn test_get_expected_withdrawals_pre_capella(self) -> Self { + let state_id = CoreStateId::Head; + + let result = self.client.get_expected_withdrawals(&state_id).await; + + match result { + Err(e) => { + assert_eq!(e.status().unwrap(), 400); + } + _ => panic!("query did not fail correctly"), + } + + self + } + pub async fn test_get_events_altair(self) -> Self { let topics = vec![EventTopic::ContributionAndProof]; let mut events_future = self @@ -5123,3 +5190,37 @@ async fn optimistic_responses() { .test_check_optimistic_responses() .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_invalid_pre_capella() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_pre_capella() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_invalid_state() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_invalid_state() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_valid_capella() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_capella() + .await; +} diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 146a832e38..74c2f3802f 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1261,6 +1261,23 @@ impl BeaconNodeHttpClient { Ok(()) } + // GET builder/states/{state_id}/expected_withdrawals + pub async fn get_expected_withdrawals( + &self, + state_id: &StateId, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("builder") + .push("states") + .push(&state_id.to_string()) + .push("expected_withdrawals"); + + self.get(path).await + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index f451d3b8f2..28fd09c09b 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -581,6 +581,11 @@ pub struct SyncingData { pub sync_distance: Slot, } +#[derive(Serialize, Deserialize)] +pub struct ExpectedWithdrawalsQuery { + pub proposal_slot: Option, +} + #[derive(Clone, PartialEq, Debug, Deserialize)] #[serde(try_from = "String", bound = "T: FromStr")] pub struct QueryVec { From ea43b6a53c0e24a4a1111bcbb1b27fd4b33b4322 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 24 Aug 2023 05:54:37 +0000 Subject: [PATCH 61/75] Revive mplex (#4619) ## Issue Addressed N/A ## Proposed Changes In #4431 , we seem to have removed support for mplex as it is being deprecated in libp2p. See https://github.com/libp2p/specs/issues/553 . Related rust-libp2p PR https://github.com/libp2p/rust-libp2p/pull/3920 However, since this isn't part of the official [consensus specs](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#multiplexing), we still need to support mplex. > Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). This PR adds back mplex support as before. --- Cargo.lock | 20 +++++++++++++++++++ beacon_node/lighthouse_network/Cargo.toml | 1 + .../lighthouse_network/src/service/utils.rs | 10 +++++++++- 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 9a8cf48336..b1779045e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4117,6 +4117,25 @@ dependencies = [ "prometheus-client", ] +[[package]] +name = "libp2p-mplex" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93959ed08b6caf9810e067655e25f1362098797fef7c44d3103e63dcb6f0fabe" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "libp2p-core", + "libp2p-identity", + "log", + "nohash-hasher", + "parking_lot 0.12.1", + "rand 0.8.5", + "smallvec 1.11.0", + "unsigned-varint 0.7.1", +] + [[package]] name = "libp2p-noise" version = "0.43.0" @@ -4382,6 +4401,7 @@ dependencies = [ "hex", "lazy_static", "libp2p", + "libp2p-mplex", "lighthouse_metrics", "lighthouse_version", "lru 0.7.8", diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index f71845fed2..925d278ad6 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -44,6 +44,7 @@ prometheus-client = "0.21.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.3.0" void = "1" +libp2p-mplex = "0.40.0" [dependencies.libp2p] version = "0.52" diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 21fd09b6b0..b8acc4ed6d 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -50,13 +50,21 @@ pub fn build_transport( transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) }; + // mplex config + let mut mplex_config = libp2p_mplex::MplexConfig::new(); + mplex_config.set_max_buffer_size(256); + mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block); + // yamux config let mut yamux_config = yamux::Config::default(); yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); let (transport, bandwidth) = transport .upgrade(core::upgrade::Version::V1) .authenticate(generate_noise_config(&local_private_key)) - .multiplex(yamux_config) + .multiplex(core::upgrade::SelectUpgrade::new( + yamux_config, + mplex_config, + )) .timeout(Duration::from_secs(10)) .boxed() .with_bandwidth_logging(); From fa6003bb5ccf1949297dea66f79849dbf073a918 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 24 Aug 2023 05:54:38 +0000 Subject: [PATCH 62/75] Use lockfile with cross and fix audit fail (#4656) ## Issue Addressed Temporary ignore for #4651. We are unaffected, and upstream will be patched in a few days. ## Proposed Changes - Ignore cargo audit failures (ublocks CI) - Use `--locked` when building with `cross`. We use `--locked` for regular builds, and I think excluding it from `cross` was just an oversight. I think for consistent builds it makes sense to use `--locked` while building. This is particularly relevant for release binaries, which otherwise will just use a random selection of dependencies that exist on build day (near impossible to recreate if we had to). --- Makefile | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index cb447e26a5..bc84cdede0 100644 --- a/Makefile +++ b/Makefile @@ -71,13 +71,13 @@ install-lcli: # optimized CPU functions that may not be available on some systems. This # results in a more portable binary with ~20% slower BLS verification. build-x86_64: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-x86_64-portable: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64-portable: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -206,8 +206,9 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: - cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093 + # cargo install --force cargo-audit + cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093 \ + --ignore RUSTSEC-2023-0052 --ignore RUSTSEC-2023-0053 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: From 14924dbc955732a742d52eafcad32cb0f790f027 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Thu, 24 Aug 2023 14:33:24 -0400 Subject: [PATCH 63/75] rust 1.72 lints (#4659) --- beacon_node/beacon_chain/src/observed_attesters.rs | 6 +++--- beacon_node/genesis/src/common.rs | 2 +- beacon_node/lighthouse_network/src/discovery/mod.rs | 2 +- .../lighthouse_network/src/peer_manager/mod.rs | 3 ++- beacon_node/store/src/chunked_vector.rs | 6 ++++-- beacon_node/store/src/leveldb_store.rs | 2 +- consensus/safe_arith/src/iter.rs | 12 ++++++------ crypto/bls/src/generic_public_key_bytes.rs | 5 +---- 8 files changed, 19 insertions(+), 19 deletions(-) diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index 59c67bd1b9..605a134321 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -841,7 +841,7 @@ mod tests { let mut store = $type::default(); let max_cap = store.max_capacity(); - let to_skip = vec![1_u64, 3, 4, 5]; + let to_skip = [1_u64, 3, 4, 5]; let periods = (0..max_cap * 3) .into_iter() .filter(|i| !to_skip.contains(i)) @@ -1012,7 +1012,7 @@ mod tests { let mut store = $type::default(); let max_cap = store.max_capacity(); - let to_skip = vec![1_u64, 3, 4, 5]; + let to_skip = [1_u64, 3, 4, 5]; let periods = (0..max_cap * 3) .into_iter() .filter(|i| !to_skip.contains(i)) @@ -1121,7 +1121,7 @@ mod tests { let mut store = $type::default(); let max_cap = store.max_capacity(); - let to_skip = vec![1_u64, 3, 4, 5]; + let to_skip = [1_u64, 3, 4, 5]; let periods = (0..max_cap * 3) .into_iter() .filter(|i| !to_skip.contains(i)) diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs index 06bf99f9f6..e48fa36204 100644 --- a/beacon_node/genesis/src/common.rs +++ b/beacon_node/genesis/src/common.rs @@ -39,7 +39,7 @@ pub fn genesis_deposits( Ok(deposit_data .into_iter() - .zip(proofs.into_iter()) + .zip(proofs) .map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| Deposit { proof, data }) .collect()) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 82a371d8a2..c3819f1eb9 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -647,7 +647,7 @@ impl Discovery { if subnet_queries.len() == MAX_SUBNETS_IN_QUERY || self.queued_queries.is_empty() { // This query is for searching for peers of a particular subnet // Drain subnet_queries so we can re-use it as we continue to process the queue - let grouped_queries: Vec = subnet_queries.drain(..).collect(); + let grouped_queries: Vec = std::mem::take(&mut subnet_queries); self.start_subnet_query(grouped_queries); processed = true; } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4f3454f403..f6158ed3a0 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -969,6 +969,7 @@ impl PeerManager { macro_rules! prune_peers { ($filter: expr) => { + let filter = $filter; for (peer_id, info) in self .network_globals .peers @@ -976,7 +977,7 @@ impl PeerManager { .worst_connected_peers() .iter() .filter(|(_, info)| { - !info.has_future_duty() && !info.is_trusted() && $filter(*info) + !info.has_future_duty() && !info.is_trusted() && filter(*info) }) { if peers_to_prune.len() diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 73edfbb074..537614f281 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -299,7 +299,8 @@ macro_rules! field { } fn update_pattern(spec: &ChainSpec) -> UpdatePattern { - $update_pattern(spec) + let update_pattern = $update_pattern; + update_pattern(spec) } fn get_value( @@ -307,7 +308,8 @@ macro_rules! field { vindex: u64, spec: &ChainSpec, ) -> Result { - $get_value(state, vindex, spec) + let get_value = $get_value; + get_value(state, vindex, spec) } fn is_fixed_length() -> bool { diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 86bd4ffacc..7aac9f72d9 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -167,7 +167,7 @@ impl KeyValueStore for LevelDB { ) }; - for (start_key, end_key) in vec![ + for (start_key, end_key) in [ endpoints(DBColumn::BeaconStateTemporary), endpoints(DBColumn::BeaconState), ] { diff --git a/consensus/safe_arith/src/iter.rs b/consensus/safe_arith/src/iter.rs index 1fc3d3a1a7..d5ee51b588 100644 --- a/consensus/safe_arith/src/iter.rs +++ b/consensus/safe_arith/src/iter.rs @@ -28,10 +28,10 @@ mod test { #[test] fn unsigned_sum_small() { - let v = vec![400u64, 401, 402, 403, 404, 405, 406]; + let arr = [400u64, 401, 402, 403, 404, 405, 406]; assert_eq!( - v.iter().copied().safe_sum().unwrap(), - v.iter().copied().sum() + arr.iter().copied().safe_sum().unwrap(), + arr.iter().copied().sum() ); } @@ -61,10 +61,10 @@ mod test { #[test] fn signed_sum_almost_overflow() { - let v = vec![i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1]; + let arr = [i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1]; assert_eq!( - v.iter().copied().safe_sum().unwrap(), - v.iter().copied().sum() + arr.iter().copied().safe_sum().unwrap(), + arr.iter().copied().sum() ); } } diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index 59b0ffc43f..240568b4f6 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -27,10 +27,7 @@ impl Copy for GenericPublicKeyBytes {} impl Clone for GenericPublicKeyBytes { fn clone(&self) -> Self { - Self { - bytes: self.bytes, - _phantom: PhantomData, - } + *self } } From c258270d6a874d407e2a465b916f97dbcb2c759a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 28 Aug 2023 00:55:28 +0000 Subject: [PATCH 64/75] update dependencies (#4639) ## Issue Addressed updates underlying dependencies and removes the ignored `RUSTSEC`'s for `cargo audit`. Also switches `procinfo` to `procfs` on `eth2` to remove the `nom` warning, `procinfo` is unmaintained see [here](https://github.com/danburkert/procinfo-rs/issues/46). --- Cargo.lock | 1212 ++++++++++++++---------- Cargo.toml | 3 +- Makefile | 5 +- beacon_node/execution_layer/Cargo.toml | 2 +- common/eth2/Cargo.toml | 10 +- common/eth2/src/lighthouse.rs | 13 +- common/eth2_network_config/Cargo.toml | 4 +- common/logging/Cargo.toml | 4 +- common/warp_utils/src/cors.rs | 12 +- common/warp_utils/src/metrics.rs | 2 +- lighthouse/tests/beacon_node.rs | 32 +- lighthouse/tests/tls/cert.pem | 24 + lighthouse/tests/tls/key.rsa | 27 + lighthouse/tests/validator_client.rs | 15 + testing/web3signer_tests/Cargo.toml | 4 +- watch/Cargo.toml | 3 +- watch/tests/tests.rs | 38 +- 17 files changed, 887 insertions(+), 523 deletions(-) create mode 100644 lighthouse/tests/tls/cert.pem create mode 100644 lighthouse/tests/tls/key.rsa diff --git a/Cargo.lock b/Cargo.lock index b1779045e3..56d421e039 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,12 +97,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", "ctr", "opaque-debug", ] +[[package]] +name = "aes" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +dependencies = [ + "cfg-if", + "cipher 0.4.4", + "cpufeatures", +] + [[package]] name = "aes-gcm" version = "0.9.4" @@ -110,8 +121,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ "aead", - "aes", - "cipher", + "aes 0.7.5", + "cipher 0.3.0", "ctr", "ghash", "subtle", @@ -141,9 +152,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" dependencies = [ "memchr", ] @@ -194,9 +205,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.72" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arbitrary" @@ -234,6 +245,45 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "asn1-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "asn1_der" version = "0.7.6" @@ -262,9 +312,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ "event-listener", ] @@ -277,7 +327,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", ] [[package]] @@ -288,18 +338,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "async-trait" -version = "0.1.72" +version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" +checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -310,7 +360,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -323,7 +373,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", ] [[package]] @@ -378,9 +428,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.19" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", @@ -395,7 +445,7 @@ dependencies = [ "memchr", "mime", "percent-encoding", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "rustversion", "serde", "serde_json", @@ -649,9 +699,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "bitvec" @@ -744,11 +794,10 @@ dependencies = [ [[package]] name = "bollard-stubs" -version = "1.41.0" +version = "1.42.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" +checksum = "ed59b5c00048f48d7af971b71f800fdf23e858844a6f9e4d32ca72e9399e7864" dependencies = [ - "chrono", "serde", "serde_with", ] @@ -794,16 +843,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "buf_redux" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" -dependencies = [ - "memchr", - "safemem", -] - [[package]] name = "builder_client" version = "0.1.0" @@ -905,7 +944,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.18", + "semver", "serde", "serde_json", "thiserror", @@ -919,9 +958,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" +dependencies = [ + "jobserver", + "libc", +] [[package]] name = "cexpr" @@ -929,7 +972,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.3", + "nom", ] [[package]] @@ -945,7 +988,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", "zeroize", ] @@ -958,7 +1001,7 @@ checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ "aead", "chacha20", - "cipher", + "cipher 0.3.0", "poly1305", "zeroize", ] @@ -971,11 +1014,7 @@ checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ "android-tzdata", "iana-time-zone", - "js-sys", "num-traits", - "serde", - "time 0.1.45", - "wasm-bindgen", "winapi", ] @@ -988,6 +1027,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "clang-sys" version = "1.6.1" @@ -1066,7 +1115,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.24", + "time", "timer", "tokio", "types", @@ -1107,9 +1156,15 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" +checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "convert_case" @@ -1326,7 +1381,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher", + "cipher 0.3.0", ] [[package]] @@ -1336,7 +1391,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix 0.26.2", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1354,19 +1409,32 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.1" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" +checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" dependencies = [ "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "packed_simd_2", "platforms 3.0.2", + "rustc_version", "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.29", +] + [[package]] name = "darling" version = "0.13.4" @@ -1508,9 +1576,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "pem-rfc7468", @@ -1518,10 +1586,24 @@ dependencies = [ ] [[package]] -name = "deranged" -version = "0.3.6" +name = "der-parser" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8810e7e2cf385b1e9b50d68264908ec367ba642c96d02edfe61c39e88e2a3c01" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" [[package]] name = "derivative" @@ -1542,7 +1624,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1554,7 +1636,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", + "rustc_version", "syn 1.0.109", ] @@ -1564,7 +1646,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7a532c1f99a0f596f6960a60d1e119e91582b24b39e2d83a190e61262c3ef0c" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "byteorder", "diesel_derives", "itoa", @@ -1581,7 +1663,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1601,7 +1683,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1681,7 +1763,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98c05fa26996c6141f78ac4fafbe297a7fa69690565ba4e0d1f2e60bde5ce501" dependencies = [ - "aes", + "aes 0.7.5", "aes-gcm", "arrayvec", "delay_map", @@ -1708,6 +1790,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.29", +] + [[package]] name = "dtoa" version = "1.0.9" @@ -1738,7 +1831,7 @@ version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ - "der 0.7.7", + "der 0.7.8", "digest 0.10.7", "elliptic-curve 0.13.5", "rfc6979 0.4.0", @@ -1748,18 +1841,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.3" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" -dependencies = [ - "signature 1.6.4", -] - -[[package]] -name = "ed25519" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" +checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ "pkcs8 0.10.2", "signature 2.1.0", @@ -1767,26 +1851,12 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "1.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ - "curve25519-dalek 3.2.0", - "ed25519 1.5.3", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "zeroize", -] - -[[package]] -name = "ed25519-dalek" -version = "2.0.0-pre.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd577ba9d4bcab443cac60003d8fd32c638e7024a3ec92c200d7af5d2c397ed" -dependencies = [ - "curve25519-dalek 4.0.0-rc.1", - "ed25519 2.2.1", + "curve25519-dalek 4.0.0", + "ed25519", "rand_core 0.6.4", "serde", "sha2 0.10.7", @@ -1906,7 +1976,7 @@ checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" dependencies = [ "base64 0.21.2", "bytes", - "ed25519-dalek 2.0.0-pre.0", + "ed25519-dalek", "hex", "k256 0.13.1", "log", @@ -1989,7 +2059,7 @@ checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2075,7 +2145,7 @@ dependencies = [ "mediatype", "mime", "pretty_reqwest_error", - "procinfo", + "procfs", "proto_array", "psutil", "reqwest", @@ -2128,7 +2198,7 @@ dependencies = [ name = "eth2_keystore" version = "0.1.0" dependencies = [ - "aes", + "aes 0.7.5", "bls", "eth2_key_derivation", "hex", @@ -2637,7 +2707,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ "memoffset 0.9.0", - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -2675,9 +2745,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", "libz-sys", @@ -2811,7 +2881,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "waker-fn", ] @@ -2823,18 +2893,17 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "futures-rustls" -version = "0.22.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" +checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.20.8", - "webpki 0.22.0", + "rustls 0.21.6", ] [[package]] @@ -2879,7 +2948,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "pin-utils", "slab", ] @@ -3258,7 +3327,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", ] [[package]] @@ -3338,9 +3407,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" @@ -3364,7 +3433,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "socket2 0.4.9", "tokio", "tower-service", @@ -3381,7 +3450,7 @@ dependencies = [ "futures-util", "http", "hyper", - "rustls 0.21.5", + "rustls 0.21.6", "tokio", "tokio-rustls 0.24.1", ] @@ -3594,6 +3663,15 @@ dependencies = [ "hashbrown 0.14.0", ] +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + [[package]] name = "instant" version = "0.1.12" @@ -3632,7 +3710,7 @@ checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi 0.3.2", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3643,7 +3721,7 @@ checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ "socket2 0.5.3", "widestring 1.0.2", - "windows-sys", + "windows-sys 0.48.0", "winreg 0.50.0", ] @@ -3699,6 +3777,15 @@ dependencies = [ "libc", ] +[[package]] +name = "jobserver" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.64" @@ -3774,7 +3861,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" dependencies = [ - "spin", + "spin 0.5.2", ] [[package]] @@ -3880,12 +3967,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "libm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" - [[package]] name = "libm" version = "0.2.7" @@ -3909,9 +3990,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.52.1" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38039ba2df4f3255842050845daef4a004cc1f26da03dbc645535088b51910ef" +checksum = "32d07d1502a027366d55afe187621c2d7895dc111a3df13b35fed698049681d7" dependencies = [ "bytes", "futures", @@ -3929,6 +4010,7 @@ dependencies = [ "libp2p-metrics", "libp2p-noise", "libp2p-plaintext", + "libp2p-quic", "libp2p-swarm", "libp2p-tcp", "libp2p-websocket", @@ -4006,9 +4088,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.45.0" +version = "0.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e378da62e8c9251f6e885ed173a561663f29b251e745586cf6ae6150b295c37" +checksum = "2d157562dba6017193e5285acf6b1054759e83540bfd79f75b69d6ce774c88da" dependencies = [ "asynchronous-codec", "base64 0.21.2", @@ -4060,13 +4142,13 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38d6012784fe4cc14e6d443eb415b11fc7c456dc15d9f0d90d9b70bc7ac3ec1" +checksum = "686e73aff5e23efbb99bc85340ea6fd8686986aa7b283a881ba182cfca535ca9" dependencies = [ "asn1_der", "bs58 0.5.0", - "ed25519-dalek 1.0.1", + "ed25519-dalek", "libsecp256k1", "log", "multihash 0.19.0", @@ -4103,9 +4185,9 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3787ea81798dcc5bf1d8b40a8e8245cf894b168d04dd70aa48cb3ff2fff141d2" +checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" dependencies = [ "instant", "libp2p-core", @@ -4138,12 +4220,12 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.43.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87945db2b3f977af09b62b9aa0a5f3e4870995a577ecd845cdeba94cdf6bbca7" +checksum = "71ce70757f2c0d82e9a3ef738fb10ea0723d16cec37f078f719e2c247704c1bb" dependencies = [ "bytes", - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.0.0", "futures", "libp2p-core", "libp2p-identity", @@ -4178,10 +4260,33 @@ dependencies = [ ] [[package]] -name = "libp2p-swarm" -version = "0.43.2" +name = "libp2p-quic" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43106820057e0f65c77b01a3873593f66e676da4e40c70c3a809b239109f1d30" +checksum = "4cb763e88f9a043546bfebd3575f340e7dd3d6c1b2cf2629600ec8965360c63a" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-tls", + "log", + "parking_lot 0.12.1", + "quinn", + "rand 0.8.5", + "rustls 0.21.6", + "socket2 0.5.3", + "thiserror", + "tokio", +] + +[[package]] +name = "libp2p-swarm" +version = "0.43.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28016944851bd73526d3c146aabf0fa9bbe27c558f080f9e5447da3a1772c01a" dependencies = [ "either", "fnv", @@ -4210,7 +4315,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -4231,10 +4336,29 @@ dependencies = [ ] [[package]] -name = "libp2p-websocket" -version = "0.42.0" +name = "libp2p-tls" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956d981ebc84abc3377e5875483c06d94ff57bc6b25f725047f9fd52592f72d4" +checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" +dependencies = [ + "futures", + "futures-rustls", + "libp2p-core", + "libp2p-identity", + "rcgen", + "ring", + "rustls 0.21.6", + "rustls-webpki", + "thiserror", + "x509-parser", + "yasna", +] + +[[package]] +name = "libp2p-websocket" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3facf0691bab65f571bc97c6c65ffa836248ca631d631b7691ac91deb7fceb5f" dependencies = [ "either", "futures", @@ -4247,14 +4371,14 @@ dependencies = [ "rw-stream-sink", "soketto", "url", - "webpki-roots 0.23.1", + "webpki-roots 0.25.2", ] [[package]] name = "libp2p-yamux" -version = "0.44.0" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a9b42ab6de15c6f076d8fb11dc5f48d899a10b55a2e16b12be9012a05287b0" +checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" dependencies = [ "futures", "libp2p-core", @@ -4452,6 +4576,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -4505,9 +4635,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "logging" @@ -4611,9 +4741,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67827e6ea8ee8a7c4a72227ef4fc08957040acffdb5f122733b24fa12daff41b" +checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" [[package]] name = "maybe-uninit" @@ -4799,7 +4929,7 @@ checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4828,6 +4958,24 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" +[[package]] +name = "multer" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "log", + "memchr", + "mime", + "spin 0.9.8", + "version_check", +] + [[package]] name = "multiaddr" version = "0.14.0" @@ -4913,24 +5061,6 @@ dependencies = [ "synstructure", ] -[[package]] -name = "multipart" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand 0.8.5", - "safemem", - "tempfile", - "twoway", -] - [[package]] name = "multistream-select" version = "0.13.0" @@ -5140,12 +5270,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" -[[package]] -name = "nom" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" - [[package]] name = "nom" version = "7.1.3" @@ -5195,7 +5319,7 @@ dependencies = [ "autocfg 0.1.8", "byteorder", "lazy_static", - "libm 0.2.7", + "libm", "num-integer", "num-iter", "num-traits", @@ -5263,6 +5387,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.18.0" @@ -5315,9 +5448,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.55" +version = "0.10.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" +checksum = "729b745ad4a5575dd06a3e1af1414bd330ee561c01b3899eb584baeaa8def17e" dependencies = [ "bitflags 1.3.2", "cfg-if", @@ -5336,7 +5469,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -5347,18 +5480,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.26.0+1.1.1u" +version = "111.27.0+1.1.1v" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc62c9f12b22b8f5208c23a7200a442b2e5999f8bdf80233852122b5a4f6f37" +checksum = "06e8f197c82d7511c5b014030c9b1efeda40d7d5f99d23b4ceed3524a5e63f02" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.90" +version = "0.9.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" +checksum = "866b5f16f90776b9bb8dc1e1802ac6f0513de3a7a7465867bfbc563dc737faac" dependencies = [ "cc", "libc", @@ -5409,16 +5542,6 @@ dependencies = [ "sha2 0.10.7", ] -[[package]] -name = "packed_simd_2" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" -dependencies = [ - "cfg-if", - "libm 0.1.4", -] - [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -5522,7 +5645,18 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec 1.11.0", - "windows-targets", + "windows-targets 0.48.3", +] + +[[package]] +name = "password-hash" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", ] [[package]] @@ -5549,6 +5683,18 @@ dependencies = [ "crypto-mac 0.11.1", ] +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.7", + "hmac 0.12.1", + "password-hash", + "sha2 0.10.7", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -5586,7 +5732,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -5609,22 +5755,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -5635,9 +5781,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" +checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" @@ -5661,7 +5807,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.7", + "der 0.7.8", "spki 0.7.2", ] @@ -5723,8 +5869,8 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.10", - "windows-sys", + "pin-project-lite 0.2.12", + "windows-sys 0.48.0", ] [[package]] @@ -5886,7 +6032,7 @@ checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -5899,15 +6045,18 @@ dependencies = [ ] [[package]] -name = "procinfo" -version = "0.4.2" +name = "procfs" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab1427f3d2635891f842892dda177883dca0639e05fe66796a62c9d2f23b49c" +checksum = "943ca7f9f29bab5844ecd8fdb3992c5969b6622bb9609b9502fef9b4310e3f1f" dependencies = [ + "bitflags 1.3.2", "byteorder", - "libc", - "nom 2.2.1", - "rustc_version 0.2.3", + "chrono", + "flate2", + "hex", + "lazy_static", + "rustix 0.36.15", ] [[package]] @@ -5939,13 +6088,13 @@ dependencies = [ [[package]] name = "prometheus-client-derive-encode" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b6a5217beb0ad503ee7fa752d451c905113d70721b937126158f3106a48cc1" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] @@ -6050,10 +6199,58 @@ dependencies = [ ] [[package]] -name = "quote" -version = "1.0.32" +name = "quinn" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +dependencies = [ + "bytes", + "futures-io", + "pin-project-lite 0.2.12", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.21.6", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13f81c9a9d574310b8351f8666f5a93ac3b0069c45c28ad52c10291389a7cf9" +dependencies = [ + "bytes", + "rand 0.8.5", + "ring", + "rustc-hash", + "rustls 0.21.6", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + +[[package]] +name = "quinn-udp" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +dependencies = [ + "bytes", + "libc", + "socket2 0.5.3", + "tracing", + "windows-sys 0.48.0", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -6193,6 +6390,18 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "rcgen" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" +dependencies = [ + "pem", + "ring", + "time", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -6224,13 +6433,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.4", + "regex-automata 0.3.6", "regex-syntax 0.7.4", ] @@ -6245,9 +6454,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.4" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ "aho-corasick", "memchr", @@ -6290,8 +6499,8 @@ dependencies = [ "native-tls", "once_cell", "percent-encoding", - "pin-project-lite 0.2.10", - "rustls 0.21.5", + "pin-project-lite 0.2.12", + "rustls 0.21.6", "rustls-pemfile", "serde", "serde_json", @@ -6350,7 +6559,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi", @@ -6440,22 +6649,36 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver 0.9.0", -] - [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.18", + "semver", +] + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + +[[package]] +name = "rustix" +version = "0.36.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.1.4", + "windows-sys 0.45.0", ] [[package]] @@ -6469,33 +6692,20 @@ dependencies = [ "io-lifetimes", "libc", "linux-raw-sys 0.3.8", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "rustix" -version = "0.38.4" +version = "0.38.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "errno", "libc", "linux-raw-sys 0.4.5", - "windows-sys", -] - -[[package]] -name = "rustls" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" -dependencies = [ - "base64 0.13.1", - "log", - "ring", - "sct 0.6.1", - "webpki 0.21.4", + "windows-sys 0.48.0", ] [[package]] @@ -6506,20 +6716,20 @@ checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", - "sct 0.7.0", - "webpki 0.22.0", + "sct", + "webpki", ] [[package]] name = "rustls" -version = "0.21.5" +version = "0.21.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" +checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" dependencies = [ "log", "ring", - "rustls-webpki 0.101.2", - "sct 0.7.0", + "rustls-webpki", + "sct", ] [[package]] @@ -6533,19 +6743,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.100.1" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", @@ -6578,19 +6778,13 @@ checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" name = "safe_arith" version = "0.1.0" -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "salsa20" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" dependencies = [ - "cipher", + "cipher 0.3.0", ] [[package]] @@ -6632,7 +6826,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -6668,16 +6862,6 @@ dependencies = [ "sha2 0.9.9", ] -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -6709,7 +6893,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.7", + "der 0.7.8", "generic-array", "pkcs8 0.10.2", "subtle", @@ -6739,15 +6923,6 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - [[package]] name = "semver" version = "1.0.18" @@ -6757,12 +6932,6 @@ dependencies = [ "serde", ] -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "send_wrapper" version = "0.6.0" @@ -6779,9 +6948,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.180" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea67f183f058fe88a4e3ec6e2788e003840893b91bac4559cabedd00863b3ed" +checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" dependencies = [ "serde_derive", ] @@ -6819,20 +6988,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.180" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24e744d7782b686ab3b73267ef05697159cc0e5abbed3f47f9933165e5219036" +checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "serde_json" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" +checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" dependencies = [ "itoa", "ryu", @@ -6857,7 +7026,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -7049,7 +7218,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.24", + "time", ] [[package]] @@ -7181,7 +7350,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.24", + "time", ] [[package]] @@ -7226,14 +7395,14 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.24", + "time", ] [[package]] name = "sloggers" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e20d36cb80da75a9c5511872f15247ddad14ead8c1dd97a86b56d1be9f5d4a0e" +checksum = "7a0a4d8569a69ee56f277bffc2f6eee637b98ed468448e8a5a84fa63efe4de9d" dependencies = [ "chrono", "libc", @@ -7286,17 +7455,17 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" +checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.1", + "curve25519-dalek 4.0.0", "rand_core 0.6.4", "ring", - "rustc_version 0.4.0", + "rustc_version", "sha2 0.10.7", "subtle", ] @@ -7318,7 +7487,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -7342,6 +7511,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "spki" version = "0.6.0" @@ -7359,7 +7534,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.7", + "der 0.7.8", ] [[package]] @@ -7575,9 +7750,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.28" +version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ "proc-macro2", "quote", @@ -7691,15 +7866,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.7.0" +version = "3.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" +checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.4", - "windows-sys", + "rustix 0.38.8", + "windows-sys 0.48.0", ] [[package]] @@ -7741,8 +7916,7 @@ dependencies = [ [[package]] name = "testcontainers" version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e2b1567ca8a2b819ea7b28c92be35d9f76fb9edb214321dcc86eb96023d1f87" +source = "git+https://github.com/testcontainers/testcontainers-rs/?rev=0f2c9851#0f2c985160e51a200cfc847097c15b8d85ed7df1" dependencies = [ "bollard-stubs", "futures", @@ -7766,22 +7940,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.44" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" +checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.44" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" +checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -7805,20 +7979,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.45" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b79eabcd964882a646b3584543ccabeae7869e9ac32a46f6f22b7a5bd405308b" +checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" dependencies = [ "deranged", "itoa", @@ -7910,22 +8073,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.29.1" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ - "autocfg 1.1.0", "backtrace", "bytes", "libc", "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "signal-hook-registry", - "socket2 0.4.9", + "socket2 0.5.3", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -7934,7 +8096,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tokio", ] @@ -7946,7 +8108,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -7975,7 +8137,7 @@ dependencies = [ "parking_lot 0.12.1", "percent-encoding", "phf", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "postgres-protocol", "postgres-types", "socket2 0.5.3", @@ -7983,17 +8145,6 @@ dependencies = [ "tokio-util 0.7.8", ] -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", -] - [[package]] name = "tokio-rustls" version = "0.23.4" @@ -8002,7 +8153,7 @@ checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls 0.20.8", "tokio", - "webpki 0.22.0", + "webpki", ] [[package]] @@ -8011,7 +8162,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.5", + "rustls 0.21.6", "tokio", ] @@ -8022,24 +8173,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tokio", "tokio-util 0.7.8", ] -[[package]] -name = "tokio-tungstenite" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" -dependencies = [ - "futures-util", - "log", - "pin-project", - "tokio", - "tungstenite 0.14.0", -] - [[package]] name = "tokio-tungstenite" version = "0.17.2" @@ -8052,10 +8190,22 @@ dependencies = [ "tokio", "tokio-rustls 0.23.4", "tungstenite 0.17.3", - "webpki 0.22.0", + "webpki", "webpki-roots 0.22.6", ] +[[package]] +name = "tokio-tungstenite" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite 0.18.0", +] + [[package]] name = "tokio-util" version = "0.6.10" @@ -8067,7 +8217,7 @@ dependencies = [ "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "slab", "tokio", ] @@ -8081,7 +8231,7 @@ dependencies = [ "bytes", "futures-core", "futures-sink", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "slab", "tokio", "tracing", @@ -8139,7 +8289,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tokio", "tower-layer", "tower-service", @@ -8166,7 +8316,7 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tracing-attributes", "tracing-core", ] @@ -8179,7 +8329,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -8334,25 +8484,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" -[[package]] -name = "tungstenite" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand 0.8.5", - "sha-1 0.9.8", - "thiserror", - "url", - "utf-8", -] - [[package]] name = "tungstenite" version = "0.17.3" @@ -8371,16 +8502,26 @@ dependencies = [ "thiserror", "url", "utf-8", - "webpki 0.22.0", + "webpki", ] [[package]] -name = "twoway" -version = "0.1.8" +name = "tungstenite" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" dependencies = [ - "memchr", + "base64 0.13.1", + "byteorder", + "bytes", + "http", + "httparse", + "log", + "rand 0.8.5", + "sha1", + "thiserror", + "url", + "utf-8", ] [[package]] @@ -8731,8 +8872,8 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.2" -source = "git+https://github.com/macladson/warp?rev=7e75acc368229a46a236a8c991bf251fe7fe50ef#7e75acc368229a46a236a8c991bf251fe7fe50ef" +version = "0.3.5" +source = "git+https://github.com/seanmonstar/warp.git?rev=149913fe#149913fed948bbe2149b52b9016170bcaef950ab" dependencies = [ "bytes", "futures-channel", @@ -8743,18 +8884,19 @@ dependencies = [ "log", "mime", "mime_guess", - "multipart", + "multer", "percent-encoding", "pin-project", + "rustls-pemfile", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.22.0", + "tokio-rustls 0.23.4", "tokio-stream", - "tokio-tungstenite 0.15.0", - "tokio-util 0.6.10", + "tokio-tungstenite 0.18.0", + "tokio-util 0.7.8", "tower-service", "tracing", ] @@ -8783,12 +8925,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -8816,7 +8952,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", "wasm-bindgen-shared", ] @@ -8850,7 +8986,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8960,16 +9096,6 @@ dependencies = [ "zip", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki" version = "0.22.0" @@ -8986,17 +9112,14 @@ version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ - "webpki 0.22.0", + "webpki", ] [[package]] name = "webpki-roots" -version = "0.23.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" -dependencies = [ - "rustls-webpki 0.100.1", -] +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "widestring" @@ -9066,7 +9189,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets", + "windows-targets 0.48.3", ] [[package]] @@ -9081,35 +9204,65 @@ dependencies = [ "winapi", ] +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.3", ] [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27f51fb4c64f8b770a823c043c7fad036323e1c48f55287b7bbb7987b2fcdf3b" +dependencies = [ + "windows_aarch64_gnullvm 0.48.3", + "windows_aarch64_msvc 0.48.3", + "windows_i686_gnu 0.48.3", + "windows_i686_msvc 0.48.3", + "windows_x86_64_gnu 0.48.3", + "windows_x86_64_gnullvm 0.48.3", + "windows_x86_64_msvc 0.48.3", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde1bb55ae4ce76a597a8566d82c57432bc69c039449d61572a7a353da28f68c" [[package]] name = "windows_aarch64_msvc" @@ -9119,9 +9272,15 @@ checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1513e8d48365a78adad7322fd6b5e4c4e99d92a69db8df2d435b25b1f1f286d4" [[package]] name = "windows_i686_gnu" @@ -9131,9 +9290,15 @@ checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60587c0265d2b842298f5858e1a5d79d146f9ee0c37be5782e92a6eb5e1d7a83" [[package]] name = "windows_i686_msvc" @@ -9143,9 +9308,15 @@ checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224fe0e0ffff5d2ea6a29f82026c8f43870038a0ffc247aa95a52b47df381ac4" [[package]] name = "windows_x86_64_gnu" @@ -9155,15 +9326,27 @@ checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62fc52a0f50a088de499712cbc012df7ebd94e2d6eb948435449d76a6287e7ad" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2093925509d91ea3d69bcd20238f4c2ecdb1a29d3c281d026a09705d0dd35f3d" [[package]] name = "windows_x86_64_msvc" @@ -9173,15 +9356,21 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6ade45bc8bf02ae2aa34a9d54ba660a1a58204da34ba793c00d83ca3730b5f1" [[package]] name = "winnow" -version = "0.5.3" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46aab759304e4d7b2075a9aecba26228bb073ee8c50db796b2c72c676b5d807" +checksum = "d09770118a7eb1ccaf4a594a221334119a44a814fcb0d31c5b85e83e97227a97" dependencies = [ "memchr", ] @@ -9202,7 +9391,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -9216,7 +9405,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version 0.4.0", + "rustc_version", "send_wrapper", "thiserror", "wasm-bindgen", @@ -9250,6 +9439,23 @@ dependencies = [ "zeroize", ] +[[package]] +name = "x509-parser" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time", +] + [[package]] name = "xml-rs" version = "0.8.16" @@ -9276,18 +9482,28 @@ dependencies = [ [[package]] name = "yamux" -version = "0.10.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" +checksum = "0329ef377816896f014435162bb3711ea7a07729c23d0960e6f8048b21b8fe91" dependencies = [ "futures", "log", "nohash-hasher", "parking_lot 0.12.1", + "pin-project", "rand 0.8.5", "static_assertions", ] +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + [[package]] name = "zeroize" version = "1.6.0" @@ -9305,19 +9521,55 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "zip" -version = "0.5.13" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" +checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ + "aes 0.8.3", "byteorder", "bzip2", + "constant_time_eq", "crc32fast", + "crossbeam-utils", "flate2", - "thiserror", - "time 0.1.45", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "sha1", + "time", + "zstd", +] + +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.8+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +dependencies = [ + "cc", + "libc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 15906a0306..9930658e65 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,8 @@ resolver = "2" [patch] [patch.crates-io] -warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } +# TODO: remove when 0.3.6 get's released. +warp = { git = "https://github.com/seanmonstar/warp.git", rev="149913fe" } [profile.maxperf] inherits = "release" diff --git a/Makefile b/Makefile index bc84cdede0..1e99b3dbb3 100644 --- a/Makefile +++ b/Makefile @@ -206,9 +206,8 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: - # cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093 \ - --ignore RUSTSEC-2023-0052 --ignore RUSTSEC-2023-0053 + cargo install --force cargo-audit + cargo audit --ignore RUSTSEC-2023-0052 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 8492a5159e..f2259a4813 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -50,4 +50,4 @@ keccak-hash = "0.10.0" hash256-std-hasher = "0.15.2" triehash = "0.8.4" hash-db = "0.15.2" -pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } \ No newline at end of file +pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index d8e1a375fd..8cb3de3a00 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -35,14 +35,8 @@ tokio = { version = "1.14.0", features = ["full"] } [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.2.2", optional = true } -procinfo = { version = "0.4.2", optional = true } +procfs = { version = "0.15.1", optional = true } [features] default = ["lighthouse"] -lighthouse = [ - "proto_array", - "psutil", - "procinfo", - "store", - "slashing_protection", -] +lighthouse = ["proto_array", "psutil", "procfs", "store", "slashing_protection"] diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 1b4bcc0e39..dfc19db492 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -95,8 +95,8 @@ pub struct ValidatorInclusionData { #[cfg(target_os = "linux")] use { - procinfo::pid, psutil::cpu::os::linux::CpuTimesExt, - psutil::memory::os::linux::VirtualMemoryExt, psutil::process::Process, + psutil::cpu::os::linux::CpuTimesExt, psutil::memory::os::linux::VirtualMemoryExt, + psutil::process::Process, }; /// Reports on the health of the Lighthouse instance. @@ -238,7 +238,7 @@ pub struct ProcessHealth { /// The pid of this process. pub pid: u32, /// The number of threads used by this pid. - pub pid_num_threads: i32, + pub pid_num_threads: i64, /// The total resident memory used by this pid. pub pid_mem_resident_set_size: u64, /// The total virtual memory used by this pid. @@ -262,7 +262,12 @@ impl ProcessHealth { .memory_info() .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; + let me = procfs::process::Process::myself() + .map_err(|e| format!("Unable to get process: {:?}", e))?; + let stat = me + .stat() + .map_err(|e| format!("Unable to get stat: {:?}", e))?; + let process_times = process .cpu_times() .map_err(|e| format!("Unable to get process cpu times : {:?}", e))?; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 338a2d243b..d1fe25f1cc 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" build = "build.rs" [build-dependencies] -zip = "0.5.8" +zip = "0.6" eth2_config = { path = "../eth2_config"} [dev-dependencies] @@ -18,4 +18,4 @@ serde_yaml = "0.8.13" types = { path = "../../consensus/types"} ethereum_ssz = "0.5.0" eth2_config = { path = "../eth2_config"} -discv5 = "0.3.1" \ No newline at end of file +discv5 = "0.3.1" diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index b6179d9e78..0e158f58ff 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -17,6 +17,6 @@ sloggers = { version = "2.1.1", features = ["json"] } slog-async = "2.7.0" take_mut = "0.2.2" parking_lot = "0.12.1" -serde = "1.0.153" +serde = "1.0.153" serde_json = "1.0.94" -chrono = "0.4.23" +chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } diff --git a/common/warp_utils/src/cors.rs b/common/warp_utils/src/cors.rs index 314ea9c8f7..55043dfd7d 100644 --- a/common/warp_utils/src/cors.rs +++ b/common/warp_utils/src/cors.rs @@ -10,10 +10,14 @@ pub fn set_builder_origins( default_origin: (IpAddr, u16), ) -> Result { if let Some(allow_origin) = allow_origin { - let origins = allow_origin - .split(',') - .map(|s| verify_cors_origin_str(s).map(|_| s)) - .collect::, _>>()?; + let mut origins = vec![]; + for origin in allow_origin.split(',') { + verify_cors_origin_str(origin)?; + if origin == "*" { + return Ok(builder.allow_any_origin()); + } + origins.push(origin) + } Ok(builder.allow_origins(origins)) } else { let origin = match default_origin.0 { diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index 1b9d89db91..d93b74ca95 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -87,7 +87,7 @@ pub fn scrape_process_health_metrics() { // This will silently fail if we are unable to observe the health. This is desired behaviour // since we don't support `Health` for all platforms. if let Ok(health) = ProcessHealth::observe() { - set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads as i64); + set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads); set_gauge(&PROCESS_RES_MEM, health.pid_mem_resident_set_size as i64); set_gauge(&PROCESS_VIRT_MEM, health.pid_mem_virtual_memory_size as i64); set_gauge(&PROCESS_SECONDS, health.pid_process_seconds_total as i64); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 37d224dbc3..626be85f19 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -11,6 +11,7 @@ use lighthouse_network::PeerId; use std::fs::File; use std::io::{Read, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::path::Path; use std::path::PathBuf; use std::process::Command; use std::str::FromStr; @@ -1460,15 +1461,20 @@ fn disable_inbound_rate_limiter_flag() { #[test] fn http_allow_origin_flag() { CommandLineTest::new() - .flag("http-allow-origin", Some("127.0.0.99")) + .flag("http", None) + .flag("http-allow-origin", Some("http://127.0.0.99")) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.http_api.allow_origin, Some("127.0.0.99".to_string())); + assert_eq!( + config.http_api.allow_origin, + Some("http://127.0.0.99".to_string()) + ); }); } #[test] fn http_allow_origin_all_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-origin", Some("*")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); @@ -1476,6 +1482,7 @@ fn http_allow_origin_all_flag() { #[test] fn http_allow_sync_stalled_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-sync-stalled", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.allow_sync_stalled, true)); @@ -1483,32 +1490,29 @@ fn http_allow_sync_stalled_flag() { #[test] fn http_enable_beacon_processor() { CommandLineTest::new() + .flag("http", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); CommandLineTest::new() + .flag("http", None) .flag("http-enable-beacon-processor", Some("true")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); CommandLineTest::new() + .flag("http", None) .flag("http-enable-beacon-processor", Some("false")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, false)); } #[test] fn http_tls_flags() { - let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() + .flag("http", None) .flag("http-enable-tls", None) - .flag( - "http-tls-cert", - dir.path().join("certificate.crt").as_os_str().to_str(), - ) - .flag( - "http-tls-key", - dir.path().join("private.key").as_os_str().to_str(), - ) + .flag("http-tls-cert", Some("tests/tls/cert.pem")) + .flag("http-tls-key", Some("tests/tls/key.rsa")) .run_with_zero_port() .with_config(|config| { let tls_config = config @@ -1516,14 +1520,15 @@ fn http_tls_flags() { .tls_config .as_ref() .expect("tls_config was empty."); - assert_eq!(tls_config.cert, dir.path().join("certificate.crt")); - assert_eq!(tls_config.key, dir.path().join("private.key")); + assert_eq!(tls_config.cert, Path::new("tests/tls/cert.pem")); + assert_eq!(tls_config.key, Path::new("tests/tls/key.rsa")); }); } #[test] fn http_spec_fork_default() { CommandLineTest::new() + .flag("http", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.spec_fork_name, None)); } @@ -1531,6 +1536,7 @@ fn http_spec_fork_default() { #[test] fn http_spec_fork_override() { CommandLineTest::new() + .flag("http", None) .flag("http-spec-fork", Some("altair")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.spec_fork_name, Some(ForkName::Altair))); diff --git a/lighthouse/tests/tls/cert.pem b/lighthouse/tests/tls/cert.pem new file mode 100644 index 0000000000..03af12ff81 --- /dev/null +++ b/lighthouse/tests/tls/cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEADCCAmigAwIBAgICAcgwDQYJKoZIhvcNAQELBQAwLDEqMCgGA1UEAwwhcG9u +eXRvd24gUlNBIGxldmVsIDIgaW50ZXJtZWRpYXRlMB4XDTE2MDgxMzE2MDcwNFoX +DTIyMDIwMzE2MDcwNFowGTEXMBUGA1UEAwwOdGVzdHNlcnZlci5jb20wggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpVhh1/FNP2qvWenbZSghari/UThwe +dynfnHG7gc3JmygkEdErWBO/CHzHgsx7biVE5b8sZYNEDKFojyoPHGWK2bQM/FTy +niJCgNCLdn6hUqqxLAml3cxGW77hAWu94THDGB1qFe+eFiAUnDmob8gNZtAzT6Ky +b/JGJdrEU0wj+Rd7wUb4kpLInNH/Jc+oz2ii2AjNbGOZXnRz7h7Kv3sO9vABByYe +LcCj3qnhejHMqVhbAT1MD6zQ2+YKBjE52MsQKU/xhUpu9KkUyLh0cxkh3zrFiKh4 +Vuvtc+n7aeOv2jJmOl1dr0XLlSHBlmoKqH6dCTSbddQLmlK7dms8vE01AgMBAAGj +gb4wgbswDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBsAwHQYDVR0OBBYEFMeUzGYV +bXwJNQVbY1+A8YXYZY8pMEIGA1UdIwQ7MDmAFJvEsUi7+D8vp8xcWvnEdVBGkpoW +oR6kHDAaMRgwFgYDVQQDDA9wb255dG93biBSU0EgQ0GCAXswOwYDVR0RBDQwMoIO +dGVzdHNlcnZlci5jb22CFXNlY29uZC50ZXN0c2VydmVyLmNvbYIJbG9jYWxob3N0 +MA0GCSqGSIb3DQEBCwUAA4IBgQBsk5ivAaRAcNgjc7LEiWXFkMg703AqDDNx7kB1 +RDgLalLvrjOfOp2jsDfST7N1tKLBSQ9bMw9X4Jve+j7XXRUthcwuoYTeeo+Cy0/T +1Q78ctoX74E2nB958zwmtRykGrgE/6JAJDwGcgpY9kBPycGxTlCN926uGxHsDwVs +98cL6ZXptMLTR6T2XP36dAJZuOICSqmCSbFR8knc/gjUO36rXTxhwci8iDbmEVaf +BHpgBXGU5+SQ+QM++v6bHGf4LNQC5NZ4e4xvGax8ioYu/BRsB/T3Lx+RlItz4zdU +XuxCNcm3nhQV2ZHquRdbSdoyIxV5kJXel4wCmOhWIq7A2OBKdu5fQzIAzzLi65EN +RPAKsKB4h7hGgvciZQ7dsMrlGw0DLdJ6UrFyiR5Io7dXYT/+JP91lP5xsl6Lhg9O +FgALt7GSYRm2cZdgi9pO9rRr83Br1VjQT1vHz6yoZMXSqc4A2zcN2a2ZVq//rHvc +FZygs8miAhWPzqnpmgTj1cPiU1M= +-----END CERTIFICATE----- diff --git a/lighthouse/tests/tls/key.rsa b/lighthouse/tests/tls/key.rsa new file mode 100644 index 0000000000..b13bf5d07f --- /dev/null +++ b/lighthouse/tests/tls/key.rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAqVYYdfxTT9qr1np22UoIWq4v1E4cHncp35xxu4HNyZsoJBHR +K1gTvwh8x4LMe24lROW/LGWDRAyhaI8qDxxlitm0DPxU8p4iQoDQi3Z+oVKqsSwJ +pd3MRlu+4QFrveExwxgdahXvnhYgFJw5qG/IDWbQM0+ism/yRiXaxFNMI/kXe8FG ++JKSyJzR/yXPqM9ootgIzWxjmV50c+4eyr97DvbwAQcmHi3Ao96p4XoxzKlYWwE9 +TA+s0NvmCgYxOdjLEClP8YVKbvSpFMi4dHMZId86xYioeFbr7XPp+2njr9oyZjpd +Xa9Fy5UhwZZqCqh+nQk0m3XUC5pSu3ZrPLxNNQIDAQABAoIBAFKtZJgGsK6md4vq +kyiYSufrcBLaaEQ/rkQtYCJKyC0NAlZKFLRy9oEpJbNLm4cQSkYPXn3Qunx5Jj2k +2MYz+SgIDy7f7KHgr52Ew020dzNQ52JFvBgt6NTZaqL1TKOS1fcJSSNIvouTBerK +NCSXHzfb4P+MfEVe/w1c4ilE+kH9SzdEo2jK/sRbzHIY8TX0JbmQ4SCLLayr22YG +usIxtIYcWt3MMP/G2luRnYzzBCje5MXdpAhlHLi4TB6x4h5PmBKYc57uOVNngKLd +YyrQKcszW4Nx5v0a4HG3A5EtUXNCco1+5asXOg2lYphQYVh2R+1wgu5WiDjDVu+6 +EYgjFSkCgYEA0NBk6FDoxE/4L/4iJ4zIhu9BptN8Je/uS5c6wRejNC/VqQyw7SHb +hRFNrXPvq5Y+2bI/DxtdzZLKAMXOMjDjj0XEgfOIn2aveOo3uE7zf1i+njxwQhPu +uSYA9AlBZiKGr2PCYSDPnViHOspVJjxRuAgyWM1Qf+CTC0D95aj0oz8CgYEAz5n4 +Cb3/WfUHxMJLljJ7PlVmlQpF5Hk3AOR9+vtqTtdxRjuxW6DH2uAHBDdC3OgppUN4 +CFj55kzc2HUuiHtmPtx8mK6G+otT7Lww+nLSFL4PvZ6CYxqcio5MPnoYd+pCxrXY +JFo2W7e4FkBOxb5PF5So5plg+d0z/QiA7aFP1osCgYEAtgi1rwC5qkm8prn4tFm6 +hkcVCIXc+IWNS0Bu693bXKdGr7RsmIynff1zpf4ntYGpEMaeymClCY0ppDrMYlzU +RBYiFNdlBvDRj6s/H+FTzHRk2DT/99rAhY9nzVY0OQFoQIXK8jlURGrkmI/CYy66 +XqBmo5t4zcHM7kaeEBOWEKkCgYAYnO6VaRtPNQfYwhhoFFAcUc+5t+AVeHGW/4AY +M5qlAlIBu64JaQSI5KqwS0T4H+ZgG6Gti68FKPO+DhaYQ9kZdtam23pRVhd7J8y+ +xMI3h1kiaBqZWVxZ6QkNFzizbui/2mtn0/JB6YQ/zxwHwcpqx0tHG8Qtm5ZAV7PB +eLCYhQKBgQDALJxU/6hMTdytEU5CLOBSMby45YD/RrfQrl2gl/vA0etPrto4RkVq +UrkDO/9W4mZORClN3knxEFSTlYi8YOboxdlynpFfhcs82wFChs+Ydp1eEsVHAqtu +T+uzn0sroycBiBfVB949LExnzGDFUkhG0i2c2InarQYLTsIyHCIDEA== +-----END RSA PRIVATE KEY----- diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 9bcfe2a1d5..062b7e7786 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -260,6 +260,7 @@ fn http_flag() { fn http_address_flag() { let addr = "127.0.0.99".parse::().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("127.0.0.99")) .flag("unencrypted-http-transport", None) .run() @@ -269,6 +270,7 @@ fn http_address_flag() { fn http_address_ipv6_flag() { let addr = "::1".parse::().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("::1")) .flag("unencrypted-http-transport", None) .run() @@ -279,6 +281,7 @@ fn http_address_ipv6_flag() { fn missing_unencrypted_http_transport_flag() { let addr = "127.0.0.99".parse::().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("127.0.0.99")) .run() .with_config(|config| assert_eq!(config.http_api.listen_addr, addr)); @@ -286,6 +289,7 @@ fn missing_unencrypted_http_transport_flag() { #[test] fn http_port_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-port", Some("9090")) .run() .with_config(|config| assert_eq!(config.http_api.listen_port, 9090)); @@ -293,6 +297,7 @@ fn http_port_flag() { #[test] fn http_allow_origin_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-origin", Some("http://localhost:9009")) .run() .with_config(|config| { @@ -305,6 +310,7 @@ fn http_allow_origin_flag() { #[test] fn http_allow_origin_all_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-origin", Some("*")) .run() .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); @@ -312,12 +318,14 @@ fn http_allow_origin_all_flag() { #[test] fn http_allow_keystore_export_default() { CommandLineTest::new() + .flag("http", None) .run() .with_config(|config| assert!(!config.http_api.allow_keystore_export)); } #[test] fn http_allow_keystore_export_present() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-keystore-export", None) .run() .with_config(|config| assert!(config.http_api.allow_keystore_export)); @@ -325,12 +333,14 @@ fn http_allow_keystore_export_present() { #[test] fn http_store_keystore_passwords_in_secrets_dir_default() { CommandLineTest::new() + .flag("http", None) .run() .with_config(|config| assert!(!config.http_api.store_passwords_in_secrets_dir)); } #[test] fn http_store_keystore_passwords_in_secrets_dir_present() { CommandLineTest::new() + .flag("http", None) .flag("http-store-passwords-in-secrets-dir", None) .run() .with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir)); @@ -348,6 +358,7 @@ fn metrics_flag() { fn metrics_address_flag() { let addr = "127.0.0.99".parse::().unwrap(); CommandLineTest::new() + .flag("metrics", None) .flag("metrics-address", Some("127.0.0.99")) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_addr, addr)); @@ -356,6 +367,7 @@ fn metrics_address_flag() { fn metrics_address_ipv6_flag() { let addr = "::1".parse::().unwrap(); CommandLineTest::new() + .flag("metrics", None) .flag("metrics-address", Some("::1")) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_addr, addr)); @@ -363,6 +375,7 @@ fn metrics_address_ipv6_flag() { #[test] fn metrics_port_flag() { CommandLineTest::new() + .flag("metrics", None) .flag("metrics-port", Some("9090")) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_port, 9090)); @@ -370,6 +383,7 @@ fn metrics_port_flag() { #[test] fn metrics_allow_origin_flag() { CommandLineTest::new() + .flag("metrics", None) .flag("metrics-allow-origin", Some("http://localhost:9009")) .run() .with_config(|config| { @@ -382,6 +396,7 @@ fn metrics_allow_origin_flag() { #[test] fn metrics_allow_origin_all_flag() { CommandLineTest::new() + .flag("metrics", None) .flag("metrics-allow-origin", Some("*")) .run() .with_config(|config| assert_eq!(config.http_metrics.allow_origin, Some("*".to_string()))); diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index c0fbf66723..faad76a19c 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -26,6 +26,6 @@ serde_derive = "1.0.116" serde_yaml = "0.8.13" eth2_network_config = { path = "../../common/eth2_network_config" } serde_json = "1.0.58" -zip = "0.5.13" +zip = "0.6" lazy_static = "1.4.0" -parking_lot = "0.12.0" \ No newline at end of file +parking_lot = "0.12.0" diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 23e2c566dc..b29df14439 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -41,6 +41,7 @@ tokio-postgres = "0.7.5" http_api = { path = "../beacon_node/http_api" } beacon_chain = { path = "../beacon_node/beacon_chain" } network = { path = "../beacon_node/network" } -testcontainers = "0.14.0" +# TODO: update to 0.15 when released: https://github.com/testcontainers/testcontainers-rs/issues/497 +testcontainers = { git = "https://github.com/testcontainers/testcontainers-rs/", rev = "0f2c9851" } unused_port = { path = "../common/unused_port" } task_executor = { path = "../common/task_executor" } diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index 9032b124ab..a54386eb75 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -23,6 +23,7 @@ use watch::{ }; use log::error; +use std::collections::HashMap; use std::env; use std::net::SocketAddr; use std::time::Duration; @@ -30,7 +31,42 @@ use tokio::{runtime, task::JoinHandle}; use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; use unused_port::unused_tcp4_port; -use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage}; +use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; + +#[derive(Debug)] +pub struct Postgres(HashMap); + +impl Default for Postgres { + fn default() -> Self { + let mut env_vars = HashMap::new(); + env_vars.insert("POSTGRES_DB".to_owned(), "postgres".to_owned()); + env_vars.insert("POSTGRES_HOST_AUTH_METHOD".into(), "trust".into()); + + Self(env_vars) + } +} + +impl Image for Postgres { + type Args = (); + + fn name(&self) -> String { + "postgres".to_owned() + } + + fn tag(&self) -> String { + "11-alpine".to_owned() + } + + fn ready_conditions(&self) -> Vec { + vec![WaitFor::message_on_stderr( + "database system is ready to accept connections", + )] + } + + fn env_vars(&self) -> Box + '_> { + Box::new(self.0.iter()) + } +} type E = MainnetEthSpec; From 8e95b69a1a9a449f352ec7a3904c549accb75111 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 28 Aug 2023 00:55:31 +0000 Subject: [PATCH 65/75] Send success code for duplicate blocks on HTTP (#4655) ## Issue Addressed Closes #4473 (take 3) ## Proposed Changes - Send a 202 status code by default for duplicate blocks, instead of 400. This conveys to the caller that the block was published, but makes no guarantees about its validity. Block relays can count this as a success or a failure as they wish. - For users wanting finer-grained control over which status is returned for duplicates, a flag `--http-duplicate-block-status` can be used to adjust the behaviour. A 400 status can be supplied to restore the old (spec-compliant) behaviour, or a 200 status can be used to silence VCs that warn loudly for non-200 codes (e.g. Lighthouse prior to v4.4.0). - Update the Lighthouse VC to gracefully handle success codes other than 200. The info message isn't the nicest thing to read, but it covers all bases and isn't a nasty `ERRO`/`CRIT` that will wake anyone up. ## Additional Info I'm planning to raise a PR to `beacon-APIs` to specify that clients may return 202 for duplicate blocks. Really it would be nice to use some 2xx code that _isn't_ the same as the code for "published but invalid". I think unfortunately there aren't any suitable codes, and maybe the best fit is `409 CONFLICT`. Given that we need to fix this promptly for our release, I think using the 202 code temporarily with configuration strikes a nice compromise. --- beacon_node/http_api/src/lib.rs | 185 ++++++++---------- beacon_node/http_api/src/publish_blocks.rs | 47 +++-- beacon_node/http_api/src/task_spawner.rs | 40 ---- beacon_node/http_api/src/test_utils.rs | 10 +- .../tests/broadcast_validation_tests.rs | 9 +- beacon_node/http_api/tests/tests.rs | 67 ++++++- beacon_node/src/cli.rs | 9 + beacon_node/src/config.rs | 3 + common/eth2/src/types.rs | 20 ++ lighthouse/tests/beacon_node.rs | 19 ++ validator_client/src/block_service.rs | 43 ++-- 11 files changed, 271 insertions(+), 181 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 3aa10139b0..19d62c060d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -139,6 +139,8 @@ pub struct Config { pub data_dir: PathBuf, pub sse_capacity_multiplier: usize, pub enable_beacon_processor: bool, + #[serde(with = "eth2::types::serde_status_code")] + pub duplicate_block_status_code: StatusCode, } impl Default for Config { @@ -154,6 +156,7 @@ impl Default for Config { data_dir: PathBuf::from(DEFAULT_ROOT_DIR), sse_capacity_multiplier: 1, enable_beacon_processor: true, + duplicate_block_status_code: StatusCode::ACCEPTED, } } } @@ -510,6 +513,8 @@ pub fn serve( let task_spawner_filter = warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone())); + let duplicate_block_status_code = ctx.config.duplicate_block_status_code; + /* * * Start of HTTP method definitions. @@ -1284,11 +1289,11 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block: Arc>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block: Arc>, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, @@ -1297,9 +1302,9 @@ pub fn serve( &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1314,11 +1319,11 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block = SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) @@ -1334,9 +1339,9 @@ pub fn serve( &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1352,12 +1357,12 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |validation_level: api_types::BroadcastValidationQuery, - block: Arc>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |validation_level: api_types::BroadcastValidationQuery, + block: Arc>, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, @@ -1366,9 +1371,9 @@ pub fn serve( &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1384,12 +1389,12 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block = SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) @@ -1405,9 +1410,9 @@ pub fn serve( &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1427,11 +1432,11 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block: SignedBeaconBlock>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block: SignedBlindedBeaconBlock, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( block, @@ -1439,9 +1444,9 @@ pub fn serve( &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1457,13 +1462,13 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = SignedBeaconBlock::>::from_ssz_bytes( + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( &block_bytes, &chain.spec, ) @@ -1476,9 +1481,9 @@ pub fn serve( &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1494,87 +1499,63 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |validation_level: api_types::BroadcastValidationQuery, - block: SignedBeaconBlock>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { - task_spawner.spawn_async(Priority::P0, async move { - match publish_blocks::publish_blinded_block( + move |validation_level: api_types::BroadcastValidationQuery, + block: SignedBlindedBeaconBlock, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + publish_blocks::publish_blinded_block( block, chain, &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } }) }, ); - let post_beacon_blinded_blocks_v2_ssz = - eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .and(log_filter.clone()) - .then( - |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| async move { - let block = - match SignedBeaconBlock::>::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) { - Ok(data) => data, - Err(_) => { - return warp::reply::with_status( - StatusCode::BAD_REQUEST, - eth2::StatusCode::BAD_REQUEST, - ) - .into_response(); - } - }; - match publish_blocks::publish_blinded_block( + let post_beacon_blinded_blocks_v2_ssz = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_blinded_block( block, chain, &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } - }, - ); + }) + }, + ); let block_id_or_err = warp::path::param::().or_else(|_| async { Err(warp_utils::reject::custom_bad_request( diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 0f2f7b361c..58524f0698 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -4,7 +4,7 @@ use beacon_chain::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, NotifyExecutionLayer, }; -use eth2::types::BroadcastValidation; +use eth2::types::{BroadcastValidation, ErrorMessage}; use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; @@ -19,7 +19,8 @@ use types::{ AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload, Hash256, SignedBeaconBlock, }; -use warp::Rejection; +use warp::http::StatusCode; +use warp::{reply::Response, Rejection, Reply}; pub enum ProvenancedBlock> { /// The payload was built using a local EE. @@ -47,7 +48,8 @@ pub async fn publish_block>( network_tx: &UnboundedSender>, log: Logger, validation_level: BroadcastValidation, -) -> Result<(), Rejection> { + duplicate_status_code: StatusCode, +) -> Result { let seen_timestamp = timestamp_now(); let (block, is_locally_built_block) = match provenanced_block { ProvenancedBlock::Local(block, _) => (block, true), @@ -75,10 +77,30 @@ pub async fn publish_block>( }; /* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */ - let gossip_verified_block = block.into_gossip_verified_block(&chain).map_err(|e| { - warn!(log, "Not publishing block, not gossip verified"; "slot" => beacon_block.slot(), "error" => ?e); - warp_utils::reject::custom_bad_request(e.to_string()) - })?; + let gossip_verified_block = match block.into_gossip_verified_block(&chain) { + Ok(b) => b, + Err(BlockError::BlockIsAlreadyKnown) => { + // Allow the status code for duplicate blocks to be overridden based on config. + return Ok(warp::reply::with_status( + warp::reply::json(&ErrorMessage { + code: duplicate_status_code.as_u16(), + message: "duplicate block".to_string(), + stacktraces: vec![], + }), + duplicate_status_code, + ) + .into_response()); + } + Err(e) => { + warn!( + log, + "Not publishing block - not gossip verified"; + "slot" => beacon_block.slot(), + "error" => ?e + ); + return Err(warp_utils::reject::custom_bad_request(e.to_string())); + } + }; let block_root = block_root.unwrap_or(gossip_verified_block.block_root); @@ -167,8 +189,7 @@ pub async fn publish_block>( &log, ) } - - Ok(()) + Ok(warp::reply().into_response()) } Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { Err(warp_utils::reject::custom_server_error( @@ -178,10 +199,6 @@ pub async fn publish_block>( Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request( "proposal for this slot and proposer has already been seen".to_string(), )), - Err(BlockError::BlockIsAlreadyKnown) => { - info!(log, "Block from HTTP API already known"; "block" => ?block_root); - Ok(()) - } Err(e) => { if let BroadcastValidation::Gossip = validation_level { Err(warp_utils::reject::broadcast_without_import(format!("{e}"))) @@ -208,7 +225,8 @@ pub async fn publish_blinded_block( network_tx: &UnboundedSender>, log: Logger, validation_level: BroadcastValidation, -) -> Result<(), Rejection> { + duplicate_status_code: StatusCode, +) -> Result { let block_root = block.canonical_root(); let full_block: ProvenancedBlock>> = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; @@ -219,6 +237,7 @@ pub async fn publish_blinded_block( network_tx, log, validation_level, + duplicate_status_code, ) .await } diff --git a/beacon_node/http_api/src/task_spawner.rs b/beacon_node/http_api/src/task_spawner.rs index 503faff717..8768e057da 100644 --- a/beacon_node/http_api/src/task_spawner.rs +++ b/beacon_node/http_api/src/task_spawner.rs @@ -159,46 +159,6 @@ impl TaskSpawner { .and_then(|x| x) } } - - /// Executes an async task which always returns a `Response`. - pub async fn spawn_async( - self, - priority: Priority, - func: impl Future + Send + Sync + 'static, - ) -> Response { - if let Some(beacon_processor_send) = &self.beacon_processor_send { - // Create a wrapper future that will execute `func` and send the - // result to a channel held by this thread. - let (tx, rx) = oneshot::channel(); - let process_fn = async move { - // Await the future, collect the return value. - let func_result = func.await; - // Send the result down the channel. Ignore any failures; the - // send can only fail if the receiver is dropped. - let _ = tx.send(func_result); - }; - - // Send the function to the beacon processor for execution at some arbitrary time. - let result = send_to_beacon_processor( - beacon_processor_send, - priority, - BlockingOrAsync::Async(Box::pin(process_fn)), - rx, - ) - .await; - convert_rejection(result).await - } else { - // There is no beacon processor so spawn a task directly on the - // tokio executor. - tokio::task::spawn(func).await.unwrap_or_else(|e| { - warp::reply::with_status( - warp::reply::json(&format!("Tokio did not execute task: {e:?}")), - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response() - }) - } - } } /// Send a task to the beacon processor and await execution. diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index b1478f3f29..33834d58ca 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -23,7 +23,7 @@ use network::{NetworkReceivers, NetworkSenders}; use sensitive_url::SensitiveUrl; use slog::Logger; use std::future::Future; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; use store::MemoryStore; @@ -220,15 +220,9 @@ pub async fn create_api_server_on_port( let ctx = Arc::new(Context { config: Config { enabled: true, - listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: port, - allow_origin: None, - tls_config: None, - allow_sync_stalled: false, data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), - spec_fork_name: None, - sse_capacity_multiplier: 1, - enable_beacon_processor: true, + ..Config::default() }, chain: Some(chain), network_senders: Some(network_senders), diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 0082589000..96ff37d81a 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -364,13 +364,14 @@ pub async fn consensus_partial_pass_only_consensus() { /* submit `block_b` which should induce equivocation */ let channel = tokio::sync::mpsc::unbounded_channel(); - let publication_result: Result<(), Rejection> = publish_block( + let publication_result = publish_block( None, ProvenancedBlock::local(gossip_block_b.unwrap()), tester.harness.chain.clone(), &channel.0, test_logger, validation_level.unwrap(), + StatusCode::ACCEPTED, ) .await; @@ -641,13 +642,14 @@ pub async fn equivocation_consensus_late_equivocation() { let channel = tokio::sync::mpsc::unbounded_channel(); - let publication_result: Result<(), Rejection> = publish_block( + let publication_result = publish_block( None, ProvenancedBlock::local(gossip_block_b.unwrap()), tester.harness.chain, &channel.0, test_logger, validation_level.unwrap(), + StatusCode::ACCEPTED, ) .await; @@ -1294,12 +1296,13 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let channel = tokio::sync::mpsc::unbounded_channel(); - let publication_result: Result<(), Rejection> = publish_blinded_block( + let publication_result = publish_blinded_block( block_b, tester.harness.chain, &channel.0, test_logger, validation_level.unwrap(), + StatusCode::ACCEPTED, ) .await; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 46cc55591c..adaf1a0f2d 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -8,7 +8,7 @@ use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, - BeaconNodeHttpClient, Error, Timeouts, + BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; @@ -1318,6 +1318,63 @@ impl ApiTester { self } + pub async fn test_post_beacon_blocks_duplicate(self) -> Self { + let block = self + .harness + .make_block( + self.harness.get_current_state(), + self.harness.get_current_slot(), + ) + .await + .0; + + assert!(self.client.post_beacon_blocks(&block).await.is_ok()); + + let blinded_block = block.clone_as_blinded(); + + // Test all the POST methods in sequence, they should all behave the same. + let responses = vec![ + self.client.post_beacon_blocks(&block).await.unwrap_err(), + self.client + .post_beacon_blocks_v2(&block, None) + .await + .unwrap_err(), + self.client + .post_beacon_blocks_ssz(&block) + .await + .unwrap_err(), + self.client + .post_beacon_blocks_v2_ssz(&block, None) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks(&blinded_block) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_v2(&blinded_block, None) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_ssz(&blinded_block) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_v2_ssz(&blinded_block, None) + .await + .unwrap_err(), + ]; + for (i, response) in responses.into_iter().enumerate() { + assert_eq!( + response.status().unwrap(), + StatusCode::ACCEPTED, + "response {i}" + ); + } + + self + } + pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { let expected = block_id @@ -4651,6 +4708,14 @@ async fn post_beacon_blocks_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_beacon_blocks_duplicate() { + ApiTester::new() + .await + .test_post_beacon_blocks_duplicate() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_attestations_valid() { ApiTester::new() diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 974dabbf0c..ed25748133 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -391,6 +391,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. \ Increasing this value can prevent messages from being dropped.") ) + .arg( + Arg::with_name("http-duplicate-block-status") + .long("http-duplicate-block-status") + .takes_value(true) + .default_value("202") + .value_name("STATUS_CODE") + .help("Status code to send when a block that is already known is POSTed to the \ + HTTP API.") + ) .arg( Arg::with_name("http-enable-beacon-processor") .long("http-enable-beacon-processor") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index d2e92b48f3..b03a8cb3e5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -155,6 +155,9 @@ pub fn get_config( client_config.http_api.enable_beacon_processor = parse_required(cli_args, "http-enable-beacon-processor")?; + client_config.http_api.duplicate_block_status_code = + parse_required(cli_args, "http-duplicate-block-status")?; + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { client_config.chain.shuffling_cache_size = cache_size; } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 28fd09c09b..822f881799 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1316,6 +1316,26 @@ pub struct BroadcastValidationQuery { pub broadcast_validation: BroadcastValidation, } +pub mod serde_status_code { + use crate::StatusCode; + use serde::{de::Error, Deserialize, Serialize}; + + pub fn serialize(status_code: &StatusCode, ser: S) -> Result + where + S: serde::Serializer, + { + status_code.as_u16().serialize(ser) + } + + pub fn deserialize<'de, D>(de: D) -> Result + where + D: serde::de::Deserializer<'de>, + { + let status_code = u16::deserialize(de)?; + StatusCode::try_from(status_code).map_err(D::Error::custom) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 626be85f19..5069b0261f 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2387,3 +2387,22 @@ fn http_sse_capacity_multiplier_override() { .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.sse_capacity_multiplier, 10)); } + +#[test] +fn http_duplicate_block_status_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.http_api.duplicate_block_status_code.as_u16(), 202) + }); +} + +#[test] +fn http_duplicate_block_status_override() { + CommandLineTest::new() + .flag("http-duplicate-block-status", Some("301")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.http_api.duplicate_block_status_code.as_u16(), 301) + }); +} diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 2a09455b6f..094b85bf81 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -10,7 +10,8 @@ use crate::{ validator_store::{Error as ValidatorStoreError, ValidatorStore}, }; use environment::RuntimeContext; -use eth2::BeaconNodeHttpClient; +use eth2::{BeaconNodeHttpClient, StatusCode}; +use slog::Logger; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::fmt::Debug; @@ -593,12 +594,7 @@ impl BlockService { beacon_node .post_beacon_blocks(&signed_block) .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? + .or_else(|e| handle_block_post_error(e, slot, log))? } BlockType::Blinded => { let _post_timer = metrics::start_timer_vec( @@ -608,12 +604,7 @@ impl BlockService { beacon_node .post_beacon_blinded_blocks(&signed_block) .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? + .or_else(|e| handle_block_post_error(e, slot, log))? } } Ok::<_, BlockError>(()) @@ -634,3 +625,29 @@ impl BlockService { Ok(()) } } + +fn handle_block_post_error(err: eth2::Error, slot: Slot, log: &Logger) -> Result<(), BlockError> { + // Handle non-200 success codes. + if let Some(status) = err.status() { + if status == StatusCode::ACCEPTED { + info!( + log, + "Block is already known to BN or might be invalid"; + "slot" => slot, + "status_code" => status.as_u16(), + ); + return Ok(()); + } else if status.is_success() { + debug!( + log, + "Block published with non-standard success code"; + "slot" => slot, + "status_code" => status.as_u16(), + ); + return Ok(()); + } + } + Err(BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {err:?}", + ))) +} From 9c24cd4ad4ab6eaa7d5725d4a300c271a1a2c25e Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 28 Aug 2023 00:55:32 +0000 Subject: [PATCH 66/75] Do not log slot clock error prior to genesis (#4657) ## Issue Addressed #4654 ## Proposed Changes Only log error if we're unable to read slot clock after genesis. I thought about simply down grading the `error` to a `warn`, but feel like it's still unnecessary noise before genesis, and it would be good to retain error log if we're pass genesis. But I'd be ok with just downgrading the log level, too. --- .../src/subnet_service/attestation_subnets.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index b4f52df39d..1cae6299e1 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -302,9 +302,16 @@ impl AttestationService { /// Gets the long lived subnets the node should be subscribed to during the current epoch and /// the remaining duration for which they remain valid. fn recompute_long_lived_subnets_inner(&mut self) -> Result { - let current_epoch = self.beacon_chain.epoch().map_err( - |e| error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e), - )?; + let current_epoch = self.beacon_chain.epoch().map_err(|e| { + if !self + .beacon_chain + .slot_clock + .is_prior_to_genesis() + .unwrap_or(false) + { + error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e) + } + })?; let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( self.node_id.raw().into(), From 55e02e7c3f92e46900b6c7df6e0eaab971718027 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 28 Aug 2023 00:55:33 +0000 Subject: [PATCH 67/75] Show `--gui` flag in help text (#4660) ## Issue Addressed N/A ## Proposed Changes Remove the `hidden(true)` modifier on the `--gui` flag so it shows up when running `lighthouse bn --help` ## Additional Info We need to include this now that Siren has had its first stable release. --- beacon_node/src/cli.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index ed25748133..837625e12a 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1134,7 +1134,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("gui") .long("gui") - .hidden(true) .help("Enable the graphical user interface and all its requirements. \ This enables --http and --validator-monitor-auto and enables SSE logging.") .takes_value(false) From e056c279aa7f476baeaad6690ac86001d42af597 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 28 Aug 2023 00:55:34 +0000 Subject: [PATCH 68/75] Increase `web3signer_tests` timeouts (#4662) ## Issue Addressed `web3signer_tests` can sometimes timeout. ## Proposed Changes Increase the `web3signer_tests` timeout from 20s to 30s ## Additional Info Previously I believed the consistent CI failures were due to this, but it ended up being something different. See below: --- The timing of this makes it very likely it is related to the [latest release of `web3-signer`](https://github.com/Consensys/web3signer/releases/tag/23.8.1). I now believe this is due to an out of date Java runtime on our runners. A newer version of Java became a requirement with the new `web3-signer` release. However, I was getting timeouts locally, which implies that the margin before timeout is quite small at 20s so bumping it up to 30s could be a good idea regardless. --- testing/web3signer_tests/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index dd17ae23b1..463de0c8b3 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -51,7 +51,7 @@ mod tests { /// If the we are unable to reach the Web3Signer HTTP API within this time out then we will /// assume it failed to start. - const UPCHECK_TIMEOUT: Duration = Duration::from_secs(20); + const UPCHECK_TIMEOUT: Duration = Duration::from_secs(30); /// Set to `false` to send the Web3Signer logs to the console during tests. Logs are useful when /// debugging. From d61f5071849723f56c4cdd02e8a4f0c6100d1543 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 28 Aug 2023 05:34:27 +0000 Subject: [PATCH 69/75] Add Holesky (#4653) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed NA ## Proposed Changes Add the Holesky network config as per https://github.com/eth-clients/holesky/tree/36e4ff2d5138dcb2eb614f0f60fdb060b2adc1e2/custom_config_data. Since the genesis state is ~190MB, I've opted to *not* include it in the binary and instead download it at runtime (see #4564 for context). To download this file we have: - A hard-coded URL for a SigP-hosted S3 bucket with the Holesky genesis state. Assuming this download works correctly, users will be none the wiser that the state wasn't included in the binary (apart from some additional logs) - If the user provides a `--checkpoint-sync-url` flag, then LH will download the genesis state from that server rather than our S3 bucket. - If the user provides a `--genesis-state-url` flag, then LH will download the genesis state from that server regardless of the S3 bucket or `--checkpoint-sync-url` flag. - Whenever a genesis state is downloaded it is checked against a checksum baked into the binary. - A genesis state will never be downloaded if it's already included in the binary. - There is a `--genesis-state-url-timeout` flag to tweak the timeout for downloading the genesis state file. ## Log Output Example of log output when a state is downloaded: ```bash Aug 23 05:40:13.424 INFO Logging to file path: "/Users/paul/.lighthouse/holesky/beacon/logs/beacon.log" Aug 23 05:40:13.425 INFO Lighthouse started version: Lighthouse/v4.3.0-bd9931f+ Aug 23 05:40:13.425 INFO Configured for network name: holesky Aug 23 05:40:13.426 INFO Data directory initialised datadir: /Users/paul/.lighthouse/holesky Aug 23 05:40:13.427 INFO Deposit contract address: 0x4242424242424242424242424242424242424242, deploy_block: 0 Aug 23 05:40:13.427 INFO Downloading genesis state info: this may take some time on testnets with large validator counts, timeout: 60s, server: https://sigp-public-genesis-states.s3.ap-southeast-2.amazonaws.com/ Aug 23 05:40:29.895 INFO Starting from known genesis state service: beacon ``` Example of log output when there are no URLs specified: ``` Aug 23 06:29:51.645 INFO Logging to file path: "/Users/paul/.lighthouse/goerli/beacon/logs/beacon.log" Aug 23 06:29:51.646 INFO Lighthouse started version: Lighthouse/v4.3.0-666a39c+ Aug 23 06:29:51.646 INFO Configured for network name: goerli Aug 23 06:29:51.647 INFO Data directory initialised datadir: /Users/paul/.lighthouse/goerli Aug 23 06:29:51.647 INFO Deposit contract address: 0xff50ed3d0ec03ac01d4c79aad74928bff48a7b2b, deploy_block: 4367322 The genesis state is not present in the binary and there are no known download URLs. Please use --checkpoint-sync-url or --genesis-state-url. ``` ## Additional Info I tested the `--genesis-state-url` flag with all 9 Goerli checkpoint sync servers on https://eth-clients.github.io/checkpoint-sync-endpoints/ and they all worked 🎉 My IDE eagerly formatted some `Cargo.toml`. I've disabled it but I don't see the value in spending time reverting the changes that are already there. I also added the `GenesisStateBytes` enum to avoid an unnecessary clone on the genesis state bytes baked into the binary. This is not a huge deal on Mainnet, but will become more relevant when testing with big genesis states. When we do a fresh checkpoint sync we're downloading the genesis state to check the `genesis_validators_root` against the finalised state we receive. This is not *entirely* pointless, since we verify the checksum when we download the genesis state so we are actually guaranteeing that the finalised state is on the same network. There might be a smarter/less-download-y way to go about this, but I've run out of cycles to figure that out. Perhaps we can grab it in the next release? --- Cargo.lock | 8 + account_manager/Cargo.toml | 10 +- account_manager/src/validator/exit.rs | 19 +- .../src/validator/slashing_protection.rs | 24 +- beacon_node/Cargo.toml | 18 +- beacon_node/client/src/builder.rs | 41 ++- beacon_node/client/src/config.rs | 15 +- beacon_node/src/config.rs | 40 ++- boot_node/src/config.rs | 16 +- common/eth2_config/src/lib.rs | 90 ++++-- common/eth2_network_config/Cargo.toml | 13 +- common/eth2_network_config/build.rs | 9 +- .../holesky/boot_enr.yaml | 8 + .../holesky/config.yaml | 117 ++++++++ .../holesky/deploy_block.txt | 1 + common/eth2_network_config/src/lib.rs | 275 ++++++++++++++++-- common/pretty_reqwest_error/src/lib.rs | 6 + lcli/src/eth1_genesis.rs | 2 +- lcli/src/interop_genesis.rs | 2 +- lcli/src/new_testnet.rs | 5 +- lighthouse/src/main.rs | 24 ++ lighthouse/tests/beacon_node.rs | 25 ++ 22 files changed, 651 insertions(+), 117 deletions(-) create mode 100644 common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml create mode 100644 common/eth2_network_config/built_in_network_configs/holesky/config.yaml create mode 100644 common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt diff --git a/Cargo.lock b/Cargo.lock index 56d421e039..75c15ba781 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,6 +33,7 @@ dependencies = [ "serde", "serde_json", "slashing_protection", + "slog", "slot_clock", "tempfile", "tokio", @@ -2223,9 +2224,16 @@ dependencies = [ "discv5", "eth2_config", "ethereum_ssz", + "logging", + "pretty_reqwest_error", + "reqwest", + "sensitive_url", "serde_yaml", + "sha2 0.10.7", + "slog", "tempfile", "types", + "url", "zip", ] diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 7d90cbb427..238e4a77e0 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "account_manager" version = "0.3.5" -authors = ["Paul Hauner ", "Luke Anderson "] +authors = [ + "Paul Hauner ", + "Luke Anderson ", +] edition = "2021" [dependencies] @@ -19,13 +22,14 @@ tokio = { version = "1.14.0", features = ["full"] } eth2_keystore = { path = "../crypto/eth2_keystore" } account_utils = { path = "../common/account_utils" } slashing_protection = { path = "../validator_client/slashing_protection" } -eth2 = {path = "../common/eth2"} -safe_arith = {path = "../consensus/safe_arith"} +eth2 = { path = "../common/eth2" } +safe_arith = { path = "../consensus/safe_arith" } slot_clock = { path = "../common/slot_clock" } filesystem = { path = "../common/filesystem" } sensitive_url = { path = "../common/sensitive_url" } serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" +slog = { version = "2.5.2" } [dev-dependencies] tempfile = "3.1.0" diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 5755a355f3..1ff61a7c01 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -10,6 +10,7 @@ use eth2_keystore::Keystore; use eth2_network_config::Eth2NetworkConfig; use safe_arith::SafeArith; use sensitive_url::SensitiveUrl; +use slog::Logger; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::path::{Path, PathBuf}; use std::time::Duration; @@ -78,6 +79,12 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< let password_file_path: Option = clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?; + let genesis_state_url: Option = + clap_utils::parse_optional(matches, "genesis-state-url")?; + let genesis_state_url_timeout = + clap_utils::parse_required(matches, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); let no_wait = matches.is_present(NO_WAIT); let no_confirmation = matches.is_present(NO_CONFIRMATION); @@ -104,6 +111,9 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< ð2_network_config, no_wait, no_confirmation, + genesis_state_url, + genesis_state_url_timeout, + env.core_context().log(), ))?; Ok(()) @@ -120,13 +130,14 @@ async fn publish_voluntary_exit( eth2_network_config: &Eth2NetworkConfig, no_wait: bool, no_confirmation: bool, + genesis_state_url: Option, + genesis_state_url_timeout: Duration, + log: &Logger, ) -> Result<(), String> { let genesis_data = get_geneisis_data(client).await?; let testnet_genesis_root = eth2_network_config - .beacon_state::() - .as_ref() - .expect("network should have valid genesis state") - .genesis_validators_root(); + .genesis_validators_root::(genesis_state_url.as_deref(), genesis_state_url_timeout, log)? + .ok_or("Genesis state is unknown")?; // Verify that the beacon node and validator being exited are on the same network. if genesis_data.genesis_validators_root != testnet_genesis_root { diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index f25bbd8159..570f29b4ad 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -7,7 +7,8 @@ use slashing_protection::{ use std::fs::File; use std::path::PathBuf; use std::str::FromStr; -use types::{BeaconState, Epoch, EthSpec, PublicKeyBytes, Slot}; +use std::time::Duration; +use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; pub const CMD: &str = "slashing-protection"; pub const IMPORT_CMD: &str = "import"; @@ -82,19 +83,24 @@ pub fn cli_run( ) -> Result<(), String> { let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME); + let genesis_state_url: Option = + clap_utils::parse_optional(matches, "genesis-state-url")?; + let genesis_state_url_timeout = + clap_utils::parse_required(matches, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + + let context = env.core_context(); let eth2_network_config = env .eth2_network_config .ok_or("Unable to get testnet configuration from the environment")?; let genesis_validators_root = eth2_network_config - .beacon_state::() - .map(|state: BeaconState| state.genesis_validators_root()) - .map_err(|e| { - format!( - "Unable to get genesis state, has genesis occurred? Detail: {:?}", - e - ) - })?; + .genesis_validators_root::( + genesis_state_url.as_deref(), + genesis_state_url_timeout, + context.log(), + )? + .ok_or_else(|| "Unable to get genesis state, has genesis occurred?".to_string())?; match matches.subcommand() { (IMPORT_CMD, Some(matches)) => { diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f55c724dc3..a0e46705b7 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "beacon_node" version = "4.3.0" -authors = ["Paul Hauner ", "Age Manning ", + "Age Manning { + ClientGenesis::GenesisState => { info!( context.log(), "Starting from known genesis state"; ); - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; + let genesis_state = genesis_state(&runtime_context, &config, log)?; builder.genesis_state(genesis_state).map(|v| (v, None))? } ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, anchor_block_bytes, - genesis_state_bytes, } => { info!(context.log(), "Starting checkpoint sync"); if config.chain.genesis_backfill { @@ -279,17 +276,13 @@ where .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; + let genesis_state = genesis_state(&runtime_context, &config, log)?; builder .weak_subjectivity_state(anchor_state, anchor_block, genesis_state) .map(|v| (v, None))? } - ClientGenesis::CheckpointSyncUrl { - genesis_state_bytes, - url, - } => { + ClientGenesis::CheckpointSyncUrl { url } => { info!( context.log(), "Starting checkpoint sync"; @@ -384,8 +377,7 @@ where debug!(context.log(), "Downloaded finalized block"); - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; + let genesis_state = genesis_state(&runtime_context, &config, log)?; info!( context.log(), @@ -1089,3 +1081,22 @@ where Ok(self) } } + +/// Obtain the genesis state from the `eth2_network_config` in `context`. +fn genesis_state( + context: &RuntimeContext, + config: &ClientConfig, + log: &Logger, +) -> Result, String> { + let eth2_network_config = context + .eth2_network_config + .as_ref() + .ok_or("An eth2_network_config is required to obtain the genesis state")?; + eth2_network_config + .genesis_state::( + config.genesis_state_url.as_deref(), + config.genesis_state_url_timeout, + log, + )? + .ok_or_else(|| "Genesis state is unknown".to_string()) +} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 283efe9c3f..adaf027984 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -7,6 +7,7 @@ use sensitive_url::SensitiveUrl; use serde_derive::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; +use std::time::Duration; use types::{Graffiti, PublicKeyBytes}; /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; @@ -25,18 +26,13 @@ pub enum ClientGenesis { /// contract. #[default] DepositContract, - /// Loads the genesis state from SSZ-encoded `BeaconState` bytes. - /// - /// We include the bytes instead of the `BeaconState` because the `EthSpec` type - /// parameter would be very annoying. - SszBytes { genesis_state_bytes: Vec }, + /// Loads the genesis state from the genesis state in the `Eth2NetworkConfig`. + GenesisState, WeakSubjSszBytes { - genesis_state_bytes: Vec, anchor_state_bytes: Vec, anchor_block_bytes: Vec, }, CheckpointSyncUrl { - genesis_state_bytes: Vec, url: SensitiveUrl, }, } @@ -81,6 +77,8 @@ pub struct Config { pub slasher: Option, pub logger_config: LoggerConfig, pub beacon_processor: BeaconProcessorConfig, + pub genesis_state_url: Option, + pub genesis_state_url_timeout: Duration, } impl Default for Config { @@ -108,6 +106,9 @@ impl Default for Config { validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, logger_config: LoggerConfig::default(), beacon_processor: <_>::default(), + genesis_state_url: <_>::default(), + // This default value should always be overwritten by the CLI default value. + genesis_state_url_timeout: Duration::from_secs(60), } } } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b03a8cb3e5..70495777e2 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -471,9 +471,30 @@ pub fn get_config( client_config.chain.checkpoint_sync_url_timeout = clap_utils::parse_required::(cli_args, "checkpoint-sync-url-timeout")?; - client_config.genesis = if let Some(genesis_state_bytes) = - eth2_network_config.genesis_state_bytes.clone() - { + client_config.genesis_state_url_timeout = + clap_utils::parse_required(cli_args, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + + let genesis_state_url_opt = + clap_utils::parse_optional::(cli_args, "genesis-state-url")?; + let checkpoint_sync_url_opt = + clap_utils::parse_optional::(cli_args, "checkpoint-sync-url")?; + + // If the `--genesis-state-url` is defined, use that to download the + // genesis state bytes. If it's not defined, try `--checkpoint-sync-url`. + client_config.genesis_state_url = if let Some(genesis_state_url) = genesis_state_url_opt { + Some(genesis_state_url) + } else if let Some(checkpoint_sync_url) = checkpoint_sync_url_opt { + // If the checkpoint sync URL is going to be used to download the + // genesis state, adopt the timeout from the checkpoint sync URL too. + client_config.genesis_state_url_timeout = + Duration::from_secs(client_config.chain.checkpoint_sync_url_timeout); + Some(checkpoint_sync_url) + } else { + None + }; + + client_config.genesis = if eth2_network_config.genesis_state_is_known() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. if let (Some(initial_state_path), Some(initial_block_path)) = ( cli_args.value_of("checkpoint-state"), @@ -495,7 +516,6 @@ pub fn get_config( let anchor_block_bytes = read(initial_block_path)?; ClientGenesis::WeakSubjSszBytes { - genesis_state_bytes, anchor_state_bytes, anchor_block_bytes, } @@ -503,17 +523,9 @@ pub fn get_config( let url = SensitiveUrl::parse(remote_bn_url) .map_err(|e| format!("Invalid checkpoint sync URL: {:?}", e))?; - ClientGenesis::CheckpointSyncUrl { - genesis_state_bytes, - url, - } + ClientGenesis::CheckpointSyncUrl { url } } else { - // Note: re-serializing the genesis state is not so efficient, however it avoids adding - // trait bounds to the `ClientGenesis` enum. This would have significant flow-on - // effects. - ClientGenesis::SszBytes { - genesis_state_bytes, - } + ClientGenesis::GenesisState } } else { if cli_args.is_present("checkpoint-state") || cli_args.is_present("checkpoint-sync-url") { diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index d006156bf9..779269921a 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -10,6 +10,7 @@ use lighthouse_network::{ use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use std::net::{SocketAddrV4, SocketAddrV6}; +use std::time::Duration; use std::{marker::PhantomData, path::PathBuf}; use types::EthSpec; @@ -90,8 +91,19 @@ impl BootNodeConfig { let enr_fork = { let spec = eth2_network_config.chain_spec::()?; - if eth2_network_config.beacon_state_is_known() { - let genesis_state = eth2_network_config.beacon_state::()?; + let genesis_state_url: Option = + clap_utils::parse_optional(matches, "genesis-state-url")?; + let genesis_state_url_timeout = + clap_utils::parse_required(matches, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + + if eth2_network_config.genesis_state_is_known() { + let genesis_state = eth2_network_config + .genesis_state::(genesis_state_url.as_deref(), genesis_state_url_timeout, &logger)? + .ok_or_else(|| { + "The genesis state for this network is not known, this is an unsupported mode" + .to_string() + })?; slog::info!(logger, "Genesis state found"; "root" => genesis_state.canonical_root().to_string()); let enr_fork = spec.enr_fork_id::( diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 7e5506667f..bb14d06756 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -23,6 +23,16 @@ pub const PREDEFINED_NETWORKS_DIR: &str = predefined_networks_dir!(); pub const GENESIS_FILE_NAME: &str = "genesis.ssz"; pub const GENESIS_ZIP_FILE_NAME: &str = "genesis.ssz.zip"; +const HOLESKY_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { + urls: &[ + // This is an AWS S3 bucket hosted by Sigma Prime. See Paul Hauner for + // more details. + "https://sigp-public-genesis-states.s3.ap-southeast-2.amazonaws.com/holesky/", + ], + checksum: "0x76631cd0b9ddc5b2c766b496e23f16759ce1181446a4efb40e5540cd15b78a07", + genesis_validators_root: "0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1", +}; + /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone)] pub struct Eth2Config { @@ -62,6 +72,32 @@ impl Eth2Config { } } +/// Describes how a genesis state may be obtained. +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum GenesisStateSource { + /// The genesis state for this network is not yet known. + Unknown, + /// The genesis state for this network is included in the binary via + /// `include_bytes!` or by loading from a testnet dir. + IncludedBytes, + /// The genesis state for this network should be downloaded from a URL. + Url { + /// URLs to try to download the file from, in order. + urls: &'static [&'static str], + /// The SHA256 of the genesis state bytes. This is *not* a hash tree + /// root to simplify the types (i.e., to avoid getting EthSpec + /// involved). + /// + /// The format should be 0x-prefixed ASCII bytes. + checksum: &'static str, + /// The `genesis_validators_root` of the genesis state. Used to avoid + /// downloading the state for simple signing operations. + /// + /// The format should be 0x-prefixed ASCII bytes. + genesis_validators_root: &'static str, + }, +} + /// A directory that can be built by downloading files via HTTP. /// /// Used by the `eth2_network_config` crate to initialize the network directories during build and @@ -70,7 +106,7 @@ impl Eth2Config { pub struct Eth2NetArchiveAndDirectory<'a> { pub name: &'a str, pub config_dir: &'a str, - pub genesis_is_known: bool, + pub genesis_state_source: GenesisStateSource, } impl<'a> Eth2NetArchiveAndDirectory<'a> { @@ -89,15 +125,11 @@ impl<'a> Eth2NetArchiveAndDirectory<'a> { } } -/// Indicates that the `genesis.ssz.zip` file is present on the filesystem. This means that the -/// deposit ceremony has concluded and the final genesis `BeaconState` is known. -const GENESIS_STATE_IS_KNOWN: bool = true; - -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq)] pub struct HardcodedNet { pub name: &'static str, pub config_dir: &'static str, - pub genesis_is_known: bool, + pub genesis_state_source: GenesisStateSource, pub config: &'static [u8], pub deploy_block: &'static [u8], pub boot_enr: &'static [u8], @@ -109,7 +141,7 @@ pub struct HardcodedNet { /// It also defines a `include__file!` macro which provides a wrapper around /// `std::include_bytes`, allowing the inclusion of bytes from the specific testnet directory. macro_rules! define_archive { - ($name_ident: ident, $config_dir: tt, $genesis_is_known: ident) => { + ($name_ident: ident, $config_dir: tt, $genesis_state_source: path) => { paste! { #[macro_use] pub mod $name_ident { @@ -118,7 +150,7 @@ macro_rules! define_archive { pub const ETH2_NET_DIR: Eth2NetArchiveAndDirectory = Eth2NetArchiveAndDirectory { name: stringify!($name_ident), config_dir: $config_dir, - genesis_is_known: $genesis_is_known, + genesis_state_source: $genesis_state_source, }; /// A wrapper around `std::include_bytes` which includes a file from a specific network @@ -151,7 +183,7 @@ macro_rules! define_net { $this_crate::HardcodedNet { name: ETH2_NET_DIR.name, config_dir: ETH2_NET_DIR.config_dir, - genesis_is_known: ETH2_NET_DIR.genesis_is_known, + genesis_state_source: ETH2_NET_DIR.genesis_state_source, config: $this_crate::$include_file!($this_crate, "../", "config.yaml"), deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"), boot_enr: $this_crate::$include_file!($this_crate, "../", "boot_enr.yaml"), @@ -199,9 +231,9 @@ macro_rules! define_nets { /// `build.rs` which will unzip the genesis states. Then, that `eth2_network_configs` crate can /// perform the final step of using `std::include_bytes` to bake the files (bytes) into the binary. macro_rules! define_hardcoded_nets { - ($(($name_ident: ident, $config_dir: tt, $genesis_is_known: ident)),+) => { + ($(($name_ident: ident, $config_dir: tt, $genesis_state_source: path)),+) => { $( - define_archive!($name_ident, $config_dir, $genesis_is_known); + define_archive!($name_ident, $config_dir, $genesis_state_source); )+ pub const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[$($name_ident::ETH2_NET_DIR,)+]; @@ -242,9 +274,8 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "mainnet", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -252,9 +283,8 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "prater", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -264,9 +294,8 @@ define_hardcoded_nets!( // // The Goerli network is effectively an alias to Prater. "prater", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -274,9 +303,8 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "gnosis", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -284,8 +312,16 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "sepolia", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes + ), + ( + // Network name (must be unique among all networks). + holesky, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "holesky", + // Describes how the genesis state can be obtained. + HOLESKY_GENESIS_STATE_SOURCE ) ); diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index d1fe25f1cc..e73f64d5a8 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -8,14 +8,21 @@ build = "build.rs" [build-dependencies] zip = "0.6" -eth2_config = { path = "../eth2_config"} +eth2_config = { path = "../eth2_config" } [dev-dependencies] tempfile = "3.1.0" [dependencies] serde_yaml = "0.8.13" -types = { path = "../../consensus/types"} +types = { path = "../../consensus/types" } ethereum_ssz = "0.5.0" -eth2_config = { path = "../eth2_config"} +eth2_config = { path = "../eth2_config" } discv5 = "0.3.1" +reqwest = { version = "0.11.0", features = ["blocking"] } +pretty_reqwest_error = { path = "../pretty_reqwest_error" } +sha2 = "0.10" +url = "2.2.2" +sensitive_url = { path = "../sensitive_url" } +slog = "2.5.2" +logging = { path = "../logging" } diff --git a/common/eth2_network_config/build.rs b/common/eth2_network_config/build.rs index fa45fafa4e..3165930f4a 100644 --- a/common/eth2_network_config/build.rs +++ b/common/eth2_network_config/build.rs @@ -1,5 +1,7 @@ //! Extracts zipped genesis states on first run. -use eth2_config::{Eth2NetArchiveAndDirectory, ETH2_NET_DIRS, GENESIS_FILE_NAME}; +use eth2_config::{ + Eth2NetArchiveAndDirectory, GenesisStateSource, ETH2_NET_DIRS, GENESIS_FILE_NAME, +}; use std::fs::File; use std::io; use zip::ZipArchive; @@ -26,7 +28,7 @@ fn uncompress_state(network: &Eth2NetArchiveAndDirectory<'static>) -> Result<(), return Ok(()); } - if network.genesis_is_known { + if network.genesis_state_source == GenesisStateSource::IncludedBytes { // Extract genesis state from genesis.ssz.zip let archive_path = network.genesis_state_archive(); let archive_file = File::open(&archive_path) @@ -46,7 +48,8 @@ fn uncompress_state(network: &Eth2NetArchiveAndDirectory<'static>) -> Result<(), io::copy(&mut file, &mut outfile) .map_err(|e| format!("Error writing file {:?}: {}", genesis_ssz_path, e))?; } else { - // Create empty genesis.ssz if genesis is unknown + // Create empty genesis.ssz if genesis is unknown or to be downloaded via URL. + // This is a bit of a hack to make `include_bytes!` easier to deal with. File::create(genesis_ssz_path) .map_err(|e| format!("Failed to create {}: {}", GENESIS_FILE_NAME, e))?; } diff --git a/common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml new file mode 100644 index 0000000000..616d41d672 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml @@ -0,0 +1,8 @@ +# EF +- enr:-Iq4QJk4WqRkjsX5c2CXtOra6HnxN-BMXnWhmhEQO9Bn9iABTJGdjUOurM7Btj1ouKaFkvTRoju5vz2GPmVON2dffQKGAX53x8JigmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk +- enr:-KG4QF6d6vMSboSujAXTI4vYqArccm0eIlXfcxf2Lx_VE1q6IkQo_2D5LAO3ZSBVUs0w5rrVDmABJZuMzISe_pZundADhGV0aDKQqX6DZjABcAAAAQAAAAAAAIJpZIJ2NIJpcISygIjpiXNlY3AyNTZrMaEDF3aSa7QSCvdqLpANNd8GML4PLEZVg45fKQwMWhDZjd2DdGNwgiMog3VkcIIjKA +- enr:-Ly4QJLXSSAj3ggPBIcodvBU6IyfpU_yW7E9J-5syoJorBuvcYj_Fokcjr303bQoTdWXADf8po0ssh75Mr5wVGzZZsMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCpfoNmMAFwAAABAAAAAAAAgmlkgnY0gmlwhJK-DYCJc2VjcDI1NmsxoQJrIlXIQDvQ6t9yDySqJYDXgZgLXzTvq8W7OI51jfmxJohzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +# Teku +- enr:-LK4QMlzEff6d-M0A1pSFG5lJ2c56i_I-ZftdojZbW3ehkGNM4pkQuHQqzVvF1BG9aDjIakjnmO23mCBFFZ2w5zOsugEh2F0dG5ldHOIAAAAAAYAAACEZXRoMpCpfoNmMAFwAAABAAAAAAAAgmlkgnY0gmlwhKyuI_mJc2VjcDI1NmsxoQIH1kQRCZW-4AIVyAeXj5o49m_IqNFKRHp6tSpfXMUrSYN0Y3CCIyiDdWRwgiMo +# Sigma Prime +- enr:-Le4QI88slOwzz66Ksq8Vnz324DPb1BzSiY-WYPvnoJIl-lceW9bmSJnwDzgNbCjp5wsBigg76x4tValvGgQPxxSjrMBhGV0aDKQqX6DZjABcAAAAQAAAAAAAIJpZIJ2NIJpcIQ5gR6Wg2lwNpAgAUHQBwEQAAAAAAAAADR-iXNlY3AyNTZrMaEDPMSNdcL92uNIyCsS177Z6KTXlbZakQqxv3aQcWawNXeDdWRwgiMohHVkcDaCI4I diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml new file mode 100644 index 0000000000..a6bfd87ade --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -0,0 +1,117 @@ +# Extends the mainnet preset +PRESET_BASE: 'mainnet' +CONFIG_NAME: holesky + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 +# Sep-15-2023 13:55:00 +UTC +MIN_GENESIS_TIME: 1694786100 +GENESIS_FORK_VERSION: 0x00017000 +# Genesis delay 5 mins +GENESIS_DELAY: 300 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x10017000 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x20017000 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x30017000 +CAPELLA_FORK_EPOCH: 256 + +# DENEB +DENEB_FORK_VERSION: 0x40017000 +DENEB_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 28,000,000,000 Gwei to ensure quicker ejection +EJECTION_BALANCE: 28000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 17000 +DEPOSIT_NETWORK_ID: 17000 +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt @@ -0,0 +1 @@ +0 diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 7274bbf029..1206b8b8ca 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -13,10 +13,20 @@ use discv5::enr::{CombinedKey, Enr}; use eth2_config::{instantiate_hardcoded_nets, HardcodedNet}; +use pretty_reqwest_error::PrettyReqwestError; +use reqwest::blocking::Client; +use sensitive_url::SensitiveUrl; +use sha2::{Digest, Sha256}; +use slog::{info, warn, Logger}; use std::fs::{create_dir_all, File}; use std::io::{Read, Write}; use std::path::PathBuf; -use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId}; +use std::str::FromStr; +use std::time::Duration; +use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId, Hash256}; +use url::Url; + +pub use eth2_config::GenesisStateSource; pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt"; pub const BOOT_ENR_FILE: &str = "boot_enr.yaml"; @@ -32,6 +42,35 @@ instantiate_hardcoded_nets!(eth2_config); pub const DEFAULT_HARDCODED_NETWORK: &str = "mainnet"; +/// A simple slice-or-vec enum to avoid cloning the beacon state bytes in the +/// binary whilst also supporting loading them from a file at runtime. +#[derive(Clone, PartialEq, Debug)] +pub enum GenesisStateBytes { + Slice(&'static [u8]), + Vec(Vec<u8>), +} + +impl AsRef<[u8]> for GenesisStateBytes { + fn as_ref(&self) -> &[u8] { + match self { + GenesisStateBytes::Slice(slice) => slice, + GenesisStateBytes::Vec(vec) => vec.as_ref(), + } + } +} + +impl From<&'static [u8]> for GenesisStateBytes { + fn from(slice: &'static [u8]) -> Self { + GenesisStateBytes::Slice(slice) + } +} + +impl From<Vec<u8>> for GenesisStateBytes { + fn from(vec: Vec<u8>) -> Self { + GenesisStateBytes::Vec(vec) + } +} + /// Specifies an Eth2 network. /// /// See the crate-level documentation for more details. @@ -41,7 +80,8 @@ pub struct Eth2NetworkConfig { /// value to be the block number where the first deposit occurs. pub deposit_contract_deploy_block: u64, pub boot_enr: Option<Vec<Enr<CombinedKey>>>, - pub genesis_state_bytes: Option<Vec<u8>>, + pub genesis_state_source: GenesisStateSource, + pub genesis_state_bytes: Option<GenesisStateBytes>, pub config: Config, } @@ -65,8 +105,10 @@ impl Eth2NetworkConfig { serde_yaml::from_reader(net.boot_enr) .map_err(|e| format!("Unable to parse boot enr: {:?}", e))?, ), - genesis_state_bytes: Some(net.genesis_state_bytes.to_vec()) - .filter(|bytes| !bytes.is_empty()), + genesis_state_source: net.genesis_state_source, + genesis_state_bytes: Some(net.genesis_state_bytes) + .filter(|bytes| !bytes.is_empty()) + .map(Into::into), config: serde_yaml::from_reader(net.config) .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?, }) @@ -81,8 +123,37 @@ impl Eth2NetworkConfig { } /// Returns `true` if this configuration contains a `BeaconState`. - pub fn beacon_state_is_known(&self) -> bool { - self.genesis_state_bytes.is_some() + pub fn genesis_state_is_known(&self) -> bool { + self.genesis_state_source != GenesisStateSource::Unknown + } + + /// The `genesis_validators_root` of the genesis state. May download the + /// genesis state if the value is not already available. + pub fn genesis_validators_root<E: EthSpec>( + &self, + genesis_state_url: Option<&str>, + timeout: Duration, + log: &Logger, + ) -> Result<Option<Hash256>, String> { + if let GenesisStateSource::Url { + genesis_validators_root, + .. + } = self.genesis_state_source + { + Hash256::from_str(genesis_validators_root) + .map(Option::Some) + .map_err(|e| { + format!( + "Unable to parse genesis state genesis_validators_root: {:?}", + e + ) + }) + } else { + self.genesis_state::<E>(genesis_state_url, timeout, log)? + .map(|state| state.genesis_validators_root()) + .map(Result::Ok) + .transpose() + } } /// Construct a consolidated `ChainSpec` from the YAML config. @@ -96,15 +167,65 @@ impl Eth2NetworkConfig { } /// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid. - pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> { + /// + /// If the genesis state is configured to be downloaded from a URL, then the + /// `genesis_state_url` will override the built-in list of download URLs. + pub fn genesis_state<E: EthSpec>( + &self, + genesis_state_url: Option<&str>, + timeout: Duration, + log: &Logger, + ) -> Result<Option<BeaconState<E>>, String> { let spec = self.chain_spec::<E>()?; - let genesis_state_bytes = self - .genesis_state_bytes - .as_ref() - .ok_or("Genesis state is unknown")?; + match &self.genesis_state_source { + GenesisStateSource::Unknown => Ok(None), + GenesisStateSource::IncludedBytes => { + let state = self + .genesis_state_bytes + .as_ref() + .map(|bytes| { + BeaconState::from_ssz_bytes(bytes.as_ref(), &spec).map_err(|e| { + format!("Built-in genesis state SSZ bytes are invalid: {:?}", e) + }) + }) + .ok_or("Genesis state bytes missing from Eth2NetworkConfig")??; + Ok(Some(state)) + } + GenesisStateSource::Url { + urls: built_in_urls, + checksum, + genesis_validators_root, + } => { + let checksum = Hash256::from_str(checksum).map_err(|e| { + format!("Unable to parse genesis state bytes checksum: {:?}", e) + })?; + let bytes = if let Some(specified_url) = genesis_state_url { + download_genesis_state(&[specified_url], timeout, checksum, log) + } else { + download_genesis_state(built_in_urls, timeout, checksum, log) + }?; + let state = BeaconState::from_ssz_bytes(bytes.as_ref(), &spec).map_err(|e| { + format!("Downloaded genesis state SSZ bytes are invalid: {:?}", e) + })?; - BeaconState::from_ssz_bytes(genesis_state_bytes, &spec) - .map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e)) + let genesis_validators_root = + Hash256::from_str(genesis_validators_root).map_err(|e| { + format!( + "Unable to parse genesis state genesis_validators_root: {:?}", + e + ) + })?; + if state.genesis_validators_root() != genesis_validators_root { + return Err(format!( + "Downloaded genesis validators root {:?} does not match expected {:?}", + state.genesis_validators_root(), + genesis_validators_root + )); + } + + Ok(Some(state)) + } + } } /// Write the files to the directory. @@ -162,7 +283,7 @@ impl Eth2NetworkConfig { File::create(&file) .map_err(|e| format!("Unable to create {:?}: {:?}", file, e)) .and_then(|mut file| { - file.write_all(genesis_state_bytes) + file.write_all(genesis_state_bytes.as_ref()) .map_err(|e| format!("Unable to write {:?}: {:?}", file, e)) })?; } @@ -198,7 +319,7 @@ impl Eth2NetworkConfig { // The genesis state is a special case because it uses SSZ, not YAML. let genesis_file_path = base_dir.join(GENESIS_STATE_FILE); - let genesis_state_bytes = if genesis_file_path.exists() { + let (genesis_state_bytes, genesis_state_source) = if genesis_file_path.exists() { let mut bytes = vec![]; File::open(&genesis_file_path) .map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e)) @@ -207,20 +328,105 @@ impl Eth2NetworkConfig { .map_err(|e| format!("Unable to read {:?}: {:?}", file, e)) })?; - Some(bytes).filter(|bytes| !bytes.is_empty()) + let state = Some(bytes).filter(|bytes| !bytes.is_empty()); + let genesis_state_source = if state.is_some() { + GenesisStateSource::IncludedBytes + } else { + GenesisStateSource::Unknown + }; + (state, genesis_state_source) } else { - None + (None, GenesisStateSource::Unknown) }; Ok(Self { deposit_contract_deploy_block, boot_enr, - genesis_state_bytes, + genesis_state_source, + genesis_state_bytes: genesis_state_bytes.map(Into::into), config, }) } } +/// Try to download a genesis state from each of the `urls` in the order they +/// are defined. Return `Ok` if any url returns a response that matches the +/// given `checksum`. +fn download_genesis_state( + urls: &[&str], + timeout: Duration, + checksum: Hash256, + log: &Logger, +) -> Result<Vec<u8>, String> { + if urls.is_empty() { + return Err( + "The genesis state is not present in the binary and there are no known download URLs. \ + Please use --checkpoint-sync-url or --genesis-state-url." + .to_string(), + ); + } + + let mut errors = vec![]; + for url in urls { + // URLs are always expected to be the base URL of a server that supports + // the beacon-API. + let url = parse_state_download_url(url)?; + let redacted_url = SensitiveUrl::new(url.clone()) + .map(|url| url.to_string()) + .unwrap_or_else(|_| "<REDACTED>".to_string()); + + info!( + log, + "Downloading genesis state"; + "server" => &redacted_url, + "timeout" => ?timeout, + "info" => "this may take some time on testnets with large validator counts" + ); + + let client = Client::new(); + let response = client + .get(url) + .header("Accept", "application/octet-stream") + .timeout(timeout) + .send() + .and_then(|r| r.error_for_status().and_then(|r| r.bytes())); + + match response { + Ok(bytes) => { + // Check the server response against our local checksum. + if Sha256::digest(bytes.as_ref())[..] == checksum[..] { + return Ok(bytes.into()); + } else { + warn!( + log, + "Genesis state download failed"; + "server" => &redacted_url, + "timeout" => ?timeout, + ); + errors.push(format!( + "Response from {} did not match local checksum", + redacted_url + )) + } + } + Err(e) => errors.push(PrettyReqwestError::from(e).to_string()), + } + } + Err(format!( + "Unable to download a genesis state from {} source(s): {}", + errors.len(), + errors.join(",") + )) +} + +/// Parses the `url` and joins the necessary state download path. +fn parse_state_download_url(url: &str) -> Result<Url, String> { + Url::parse(url) + .map_err(|e| format!("Invalid genesis state URL: {:?}", e))? + .join("eth/v2/debug/beacon/states/genesis") + .map_err(|e| format!("Failed to append genesis state path to URL: {:?}", e)) +} + #[cfg(test)] mod tests { use super::*; @@ -260,7 +466,9 @@ mod tests { #[test] fn mainnet_genesis_state() { let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap(); - config.beacon_state::<E>().expect("beacon state can decode"); + config + .genesis_state::<E>(None, Duration::from_secs(1), &logging::test_logger()) + .expect("beacon state can decode"); } #[test] @@ -285,10 +493,25 @@ mod tests { assert_eq!( config.genesis_state_bytes.is_some(), - net.genesis_is_known, + net.genesis_state_source == GenesisStateSource::IncludedBytes, "{:?}", net.name ); + + if let GenesisStateSource::Url { + urls, + checksum, + genesis_validators_root, + } = net.genesis_state_source + { + Hash256::from_str(checksum).expect("the checksum must be a valid 32-byte value"); + Hash256::from_str(genesis_validators_root) + .expect("the GVR must be a valid 32-byte value"); + for url in urls { + parse_state_download_url(url).expect("url must be valid"); + } + } + assert_eq!(config.config.config_name, Some(net.config_dir.to_string())); } } @@ -324,10 +547,20 @@ mod tests { let base_dir = temp_dir.path().join("my_testnet"); let deposit_contract_deploy_block = 42; + let genesis_state_source = if genesis_state.is_some() { + GenesisStateSource::IncludedBytes + } else { + GenesisStateSource::Unknown + }; + let testnet: Eth2NetworkConfig = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr, - genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes), + genesis_state_source, + genesis_state_bytes: genesis_state + .as_ref() + .map(Encode::as_ssz_bytes) + .map(Into::into), config, }; diff --git a/common/pretty_reqwest_error/src/lib.rs b/common/pretty_reqwest_error/src/lib.rs index 4c605f38ae..0aaee5965e 100644 --- a/common/pretty_reqwest_error/src/lib.rs +++ b/common/pretty_reqwest_error/src/lib.rs @@ -55,6 +55,12 @@ impl fmt::Debug for PrettyReqwestError { } } +impl fmt::Display for PrettyReqwestError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + impl From<reqwest::Error> for PrettyReqwestError { fn from(inner: reqwest::Error) -> Self { Self(inner) diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 34144cd86d..bddd4baad8 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -49,7 +49,7 @@ pub fn run<T: EthSpec>( .wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec) .await .map(move |genesis_state| { - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); + eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); eth2_network_config.force_write_to_file(testnet_dir) }) .map_err(|e| format!("Failed to find genesis: {}", e))?; diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs index 57a5ba0098..1a0b81fcb7 100644 --- a/lcli/src/interop_genesis.rs +++ b/lcli/src/interop_genesis.rs @@ -42,7 +42,7 @@ pub fn run<T: EthSpec>(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), &spec, )?; - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); + eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); eth2_network_config.force_write_to_file(testnet_dir)?; Ok(()) diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 01a44cabef..973993f979 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,7 +1,7 @@ use account_utils::eth2_keystore::keypair_from_secret; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; -use eth2_network_config::Eth2NetworkConfig; +use eth2_network_config::{Eth2NetworkConfig, GenesisStateSource}; use eth2_wallet::bip39::Seed; use eth2_wallet::bip39::{Language, Mnemonic}; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; @@ -190,7 +190,8 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr: Some(vec![]), - genesis_state_bytes, + genesis_state_bytes: genesis_state_bytes.map(Into::into), + genesis_state_source: GenesisStateSource::IncludedBytes, config: Config::from_chain_spec::<T>(&spec), }; diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index d8b522307c..6384fc53cd 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -324,6 +324,30 @@ fn main() { .takes_value(true) .global(true) ) + .arg( + Arg::with_name("genesis-state-url") + .long("genesis-state-url") + .value_name("URL") + .help( + "A URL of a beacon-API compatible server from which to download the genesis state. \ + Checkpoint sync server URLs can generally be used with this flag. \ + If not supplied, a default URL or the --checkpoint-sync-url may be used. \ + If the genesis state is already included in this binary then this value will be ignored.", + ) + .takes_value(true) + .global(true), + ) + .arg( + Arg::with_name("genesis-state-url-timeout") + .long("genesis-state-url-timeout") + .value_name("SECONDS") + .help( + "The timeout in seconds for the request to --genesis-state-url.", + ) + .takes_value(true) + .default_value("180") + .global(true), + ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) .subcommand(validator_client::cli_app()) diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 5069b0261f..05b4358509 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2406,3 +2406,28 @@ fn http_duplicate_block_status_override() { assert_eq!(config.http_api.duplicate_block_status_code.as_u16(), 301) }); } + +#[test] +fn genesis_state_url_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.genesis_state_url, None); + assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(180)); + }); +} + +#[test] +fn genesis_state_url_value() { + CommandLineTest::new() + .flag("genesis-state-url", Some("http://genesis.com")) + .flag("genesis-state-url-timeout", Some("42")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.genesis_state_url.as_deref(), + Some("http://genesis.com") + ); + assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42)); + }); +} From f284e0e26445f776e470ad694a947cb10adc5686 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Mon, 28 Aug 2023 05:34:28 +0000 Subject: [PATCH 70/75] Fix bug in block root storage (#4663) ## Issue Addressed Fix a bug in the storage of the linear block roots array in the freezer DB. Previously this array was always written as part of state storage (or block backfill). With state pruning enabled by #4610, these states were no longer being written and as a result neither were the block roots. The impact is quite low, we would just log an error when trying to forwards-iterate the block roots, which for validating nodes only happens when they try to look up blocks for peers: > Aug 25 03:42:36.980 ERRO Missing chunk in forwards iterator chunk index: 49726, service: freezer_db Any node checkpoint synced off `unstable` is affected and has a corrupt database. If you see the log above, you need to re-sync with the fix. Nodes that haven't checkpoint synced recently should _not_ be corrupted, even if they ran the buggy version. ## Proposed Changes - Use a `ChunkWriter` to write the block roots when states are not being stored. - Tweak the usage of `get_latest_restore_point` so that it doesn't return a nonsense value when state pruning is enabled. - Tweak the guarantee on the block roots array so that block roots are assumed available up to the split slot (exclusive). This is a bit nicer than relying on anything to do with the latest restore point, which is a nonsensical concept when there aren't any restore points. ## Additional Info I'm looking forward to deleting the chunked vector code for good when we merge tree-states :grin: --- Cargo.lock | 1 + beacon_node/beacon_chain/src/builder.rs | 15 ++++ beacon_node/beacon_chain/tests/store_tests.rs | 2 +- beacon_node/store/src/chunked_iter.rs | 8 +- beacon_node/store/src/forwards_iter.rs | 54 ++++++++++-- beacon_node/store/src/hot_cold_store.rs | 84 +++++++++++++++---- watch/Cargo.toml | 1 + watch/tests/tests.rs | 23 +++-- 8 files changed, 145 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75c15ba781..27d7b655e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9051,6 +9051,7 @@ dependencies = [ "http_api", "hyper", "log", + "logging", "network", "r2d2", "rand 0.7.3", diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index bb629d6624..54739f2b8a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -481,6 +481,21 @@ where let (_, updated_builder) = self.set_genesis_state(genesis_state)?; self = updated_builder; + // Fill in the linear block roots between the checkpoint block's slot and the aligned + // state's slot. All slots less than the block's slot will be handled by block backfill, + // while states greater or equal to the checkpoint state will be handled by `migrate_db`. + let block_root_batch = store + .store_frozen_block_root_at_skip_slots( + weak_subj_block.slot(), + weak_subj_state.slot(), + weak_subj_block_root, + ) + .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; + store + .cold_db + .do_atomically(block_root_batch) + .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; + // Write the state and block non-atomically, it doesn't matter if they're forgotten // about on a crash restart. store diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index bf68657b4f..ab54af42c7 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -421,7 +421,7 @@ async fn forwards_iter_block_and_state_roots_until() { // The last restore point slot is the point at which the hybrid forwards iterator behaviour // changes. - let last_restore_point_slot = store.get_latest_restore_point_slot(); + let last_restore_point_slot = store.get_latest_restore_point_slot().unwrap(); assert!(last_restore_point_slot > 0); let chain = &harness.chain; diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs index 8ef0b6d201..b3322b5225 100644 --- a/beacon_node/store/src/chunked_iter.rs +++ b/beacon_node/store/src/chunked_iter.rs @@ -30,16 +30,16 @@ where /// Create a new iterator which can yield elements from `start_vindex` up to the last /// index stored by the restore point at `last_restore_point_slot`. /// - /// The `last_restore_point` slot should be the slot of a recent restore point as obtained from - /// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can + /// The `freezer_upper_limit` slot should be the slot of a recent restore point as obtained from + /// `Root::freezer_upper_limit`. We pass it as a parameter so that the caller can /// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`). pub fn new( store: &'a HotColdDB<E, Hot, Cold>, start_vindex: usize, - last_restore_point_slot: Slot, + freezer_upper_limit: Slot, spec: &ChainSpec, ) -> Self { - let (_, end_vindex) = F::start_and_end_vindex(last_restore_point_slot, spec); + let (_, end_vindex) = F::start_and_end_vindex(freezer_upper_limit, spec); // Set the next chunk to the one containing `start_vindex`. let next_cindex = start_vindex / F::chunk_size(); diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 353be6bf05..125b73a458 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -19,6 +19,14 @@ pub trait Root<E: EthSpec>: Field<E, Value = Hash256> { end_state: BeaconState<E>, end_root: Hash256, ) -> Result<SimpleForwardsIterator>; + + /// The first slot for which this field is *no longer* stored in the freezer database. + /// + /// If `None`, then this field is not stored in the freezer database at all due to pruning + /// configuration. + fn freezer_upper_limit<Hot: ItemStore<E>, Cold: ItemStore<E>>( + store: &HotColdDB<E, Hot, Cold>, + ) -> Option<Slot>; } impl<E: EthSpec> Root<E> for BlockRoots { @@ -39,6 +47,13 @@ impl<E: EthSpec> Root<E> for BlockRoots { )?; Ok(SimpleForwardsIterator { values }) } + + fn freezer_upper_limit<Hot: ItemStore<E>, Cold: ItemStore<E>>( + store: &HotColdDB<E, Hot, Cold>, + ) -> Option<Slot> { + // Block roots are stored for all slots up to the split slot (exclusive). + Some(store.get_split_slot()) + } } impl<E: EthSpec> Root<E> for StateRoots { @@ -59,6 +74,15 @@ impl<E: EthSpec> Root<E> for StateRoots { )?; Ok(SimpleForwardsIterator { values }) } + + fn freezer_upper_limit<Hot: ItemStore<E>, Cold: ItemStore<E>>( + store: &HotColdDB<E, Hot, Cold>, + ) -> Option<Slot> { + // State roots are stored for all slots up to the latest restore point (exclusive). + // There may not be a latest restore point if state pruning is enabled, in which + // case this function will return `None`. + store.get_latest_restore_point_slot() + } } /// Forwards root iterator that makes use of a flat field table in the freezer DB. @@ -118,6 +142,7 @@ impl Iterator for SimpleForwardsIterator { pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>> { PreFinalization { iter: Box<FrozenForwardsIterator<'a, E, F, Hot, Cold>>, + end_slot: Option<Slot>, /// Data required by the `PostFinalization` iterator when we get to it. continuation_data: Option<Box<(BeaconState<E>, Hash256)>>, }, @@ -129,6 +154,7 @@ pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, C PostFinalization { iter: SimpleForwardsIterator, }, + Finished, } impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>> @@ -138,8 +164,8 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>> /// /// The `get_state` closure should return a beacon state and final block/state root to backtrack /// from in the case where the iterated range does not lie entirely within the frozen portion of - /// the database. If an `end_slot` is provided and it is before the database's latest restore - /// point slot then the `get_state` closure will not be called at all. + /// the database. If an `end_slot` is provided and it is before the database's freezer upper + /// limit for the field then the `get_state` closure will not be called at all. /// /// It is OK for `get_state` to hold a lock while this function is evaluated, as the returned /// iterator is as lazy as possible and won't do any work apart from calling `get_state`. @@ -155,13 +181,15 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>> ) -> Result<Self> { use HybridForwardsIterator::*; - let latest_restore_point_slot = store.get_latest_restore_point_slot(); + // First slot at which this field is *not* available in the freezer. i.e. all slots less + // than this slot have their data available in the freezer. + let freezer_upper_limit = F::freezer_upper_limit(store).unwrap_or(Slot::new(0)); - let result = if start_slot < latest_restore_point_slot { + let result = if start_slot < freezer_upper_limit { let iter = Box::new(FrozenForwardsIterator::new( store, start_slot, - latest_restore_point_slot, + freezer_upper_limit, spec, )); @@ -169,13 +197,14 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>> // `end_slot`. If it tries to continue further a `NoContinuationData` error will be // returned. let continuation_data = - if end_slot.map_or(false, |end_slot| end_slot < latest_restore_point_slot) { + if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_limit) { None } else { Some(Box::new(get_state())) }; PreFinalization { iter, + end_slot, continuation_data, } } else { @@ -195,6 +224,7 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>> match self { PreFinalization { iter, + end_slot, continuation_data, } => { match iter.next() { @@ -203,10 +233,17 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>> // to a post-finalization iterator beginning from the last slot // of the pre iterator. None => { + // If the iterator has an end slot (inclusive) which has already been + // covered by the (exclusive) frozen forwards iterator, then we're done! + let iter_end_slot = Slot::from(iter.inner.end_vindex); + if end_slot.map_or(false, |end_slot| iter_end_slot == end_slot + 1) { + *self = Finished; + return Ok(None); + } + let continuation_data = continuation_data.take(); let store = iter.inner.store; - let start_slot = Slot::from(iter.inner.end_vindex); - + let start_slot = iter_end_slot; *self = PostFinalizationLazy { continuation_data, store, @@ -230,6 +267,7 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>> self.do_next() } PostFinalization { iter } => iter.next().transpose(), + Finished => Ok(None), } } } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 47839ed3e9..87f8e0ffc3 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -18,7 +18,7 @@ use crate::metadata::{ }; use crate::metrics; use crate::{ - get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, + get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, StoreOp, }; use itertools::process_results; @@ -963,6 +963,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold> ops.push(op); // 2. Store updated vector entries. + // Block roots need to be written here as well as by the `ChunkWriter` in `migrate_db` + // because states may require older block roots, and the writer only stores block roots + // between the previous split point and the new split point. let db = &self.cold_db; store_updated_vector(BlockRoots, db, state, &self.spec, ops)?; store_updated_vector(StateRoots, db, state, &self.spec, ops)?; @@ -1243,10 +1246,21 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold> }; } - /// Fetch the slot of the most recently stored restore point. - pub fn get_latest_restore_point_slot(&self) -> Slot { - (self.get_split_slot() - 1) / self.config.slots_per_restore_point - * self.config.slots_per_restore_point + /// Fetch the slot of the most recently stored restore point (if any). + pub fn get_latest_restore_point_slot(&self) -> Option<Slot> { + let split_slot = self.get_split_slot(); + let anchor = self.get_anchor_info(); + + // There are no restore points stored if the state upper limit lies in the hot database. + // It hasn't been reached yet, and may never be. + if anchor.map_or(false, |a| a.state_upper_limit >= split_slot) { + None + } else { + Some( + (split_slot - 1) / self.config.slots_per_restore_point + * self.config.slots_per_restore_point, + ) + } } /// Load the database schema version from disk. @@ -1585,6 +1599,25 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold> ) } + /// Update the linear array of frozen block roots with the block root for several skipped slots. + /// + /// Write the block root at all slots from `start_slot` (inclusive) to `end_slot` (exclusive). + pub fn store_frozen_block_root_at_skip_slots( + &self, + start_slot: Slot, + end_slot: Slot, + block_root: Hash256, + ) -> Result<Vec<KeyValueStoreOp>, Error> { + let mut ops = vec![]; + let mut block_root_writer = + ChunkWriter::<BlockRoots, _, _>::new(&self.cold_db, start_slot.as_usize())?; + for slot in start_slot.as_usize()..end_slot.as_usize() { + block_root_writer.set(slot, block_root, &mut ops)?; + } + block_root_writer.write(&mut ops)?; + Ok(ops) + } + /// Try to prune all execution payloads, returning early if there is no need to prune. pub fn try_prune_execution_payloads(&self, force: bool) -> Result<(), Error> { let split = self.get_split_info(); @@ -1725,7 +1758,14 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>( return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into()); } - let mut hot_db_ops: Vec<StoreOp<E>> = Vec::new(); + let mut hot_db_ops = vec![]; + let mut cold_db_ops = vec![]; + + // Chunk writer for the linear block roots in the freezer DB. + // Start at the new upper limit because we iterate backwards. + let new_frozen_block_root_upper_limit = finalized_state.slot().as_usize().saturating_sub(1); + let mut block_root_writer = + ChunkWriter::<BlockRoots, _, _>::new(&store.cold_db, new_frozen_block_root_upper_limit)?; // 1. Copy all of the states between the new finalized state and the split slot, from the hot DB // to the cold DB. Delete the execution payloads of these now-finalized blocks. @@ -1750,6 +1790,9 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>( // Delete the old summary, and the full state if we lie on an epoch boundary. hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); + // Store the block root for this slot in the linear array of frozen block roots. + block_root_writer.set(slot.as_usize(), block_root, &mut cold_db_ops)?; + // Do not try to store states if a restore point is yet to be stored, or will never be // stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state // which always needs to be copied from the hot DB to the freezer and should not be deleted. @@ -1759,29 +1802,34 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>( .map_or(false, |anchor| slot < anchor.state_upper_limit) { debug!(store.log, "Pruning finalized state"; "slot" => slot); + continue; } - let mut cold_db_ops: Vec<KeyValueStoreOp> = Vec::new(); - - if slot % store.config.slots_per_restore_point == 0 { - let state: BeaconState<E> = get_full_state(&store.hot_db, &state_root, &store.spec)? - .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; - - store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; - } - // Store a pointer from this state root to its slot, so we can later reconstruct states // from their state root alone. let cold_state_summary = ColdStateSummary { slot }; let op = cold_state_summary.as_kv_store_op(state_root); cold_db_ops.push(op); - // There are data dependencies between calls to `store_cold_state()` that prevent us from - // doing one big call to `store.cold_db.do_atomically()` at end of the loop. - store.cold_db.do_atomically(cold_db_ops)?; + if slot % store.config.slots_per_restore_point == 0 { + let state: BeaconState<E> = get_full_state(&store.hot_db, &state_root, &store.spec)? + .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; + + store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; + + // Commit the batch of cold DB ops whenever a full state is written. Each state stored + // may read the linear fields of previous states stored. + store + .cold_db + .do_atomically(std::mem::take(&mut cold_db_ops))?; + } } + // Finish writing the block roots and commit the remaining cold DB ops. + block_root_writer.write(&mut cold_db_ops)?; + store.cold_db.do_atomically(cold_db_ops)?; + // Warning: Critical section. We have to take care not to put any of the two databases in an // inconsistent state if the OS process dies at any point during the freezeing // procedure. diff --git a/watch/Cargo.toml b/watch/Cargo.toml index b29df14439..3dc3b7c190 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -45,3 +45,4 @@ network = { path = "../beacon_node/network" } testcontainers = { git = "https://github.com/testcontainers/testcontainers-rs/", rev = "0f2c9851" } unused_port = { path = "../common/unused_port" } task_executor = { path = "../common/task_executor" } +logging = { path = "../common/logging" } diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index a54386eb75..dc0b8af6e3 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -7,12 +7,21 @@ use beacon_chain::{ }; use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use http_api::test_utils::{create_api_server, ApiServer}; +use log::error; +use logging::test_logger; use network::NetworkReceivers; - use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; +use std::collections::HashMap; +use std::env; +use std::net::SocketAddr; +use std::time::Duration; +use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; use tokio::sync::oneshot; +use tokio::{runtime, task::JoinHandle}; +use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; use types::{Hash256, MainnetEthSpec, Slot}; +use unused_port::unused_tcp4_port; use url::Url; use watch::{ client::WatchHttpClient, @@ -22,17 +31,6 @@ use watch::{ updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec}, }; -use log::error; -use std::collections::HashMap; -use std::env; -use std::net::SocketAddr; -use std::time::Duration; -use tokio::{runtime, task::JoinHandle}; -use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; -use unused_port::unused_tcp4_port; - -use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; - #[derive(Debug)] pub struct Postgres(HashMap<String, String>); @@ -132,6 +130,7 @@ impl TesterBuilder { reconstruct_historic_states: true, ..ChainConfig::default() }) + .logger(test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .build(); From 0c23c86849401ea38456e8b1b9a9b0ea4b826ea7 Mon Sep 17 00:00:00 2001 From: Philippe Schommers <philippe@schommers.be> Date: Tue, 29 Aug 2023 05:56:30 +0000 Subject: [PATCH 71/75] feat: add chiado (#4530) ## Issue Addressed N/A ## Proposed Changes Adds the Chiado (Gnosis testnet) network to the builtin one. ## Additional Info It's a fairly trivial change all things considered as the preset already exists, so shouldn't be hard to maintain. It compiles and seems to work, but I'm sure I missed something? Co-authored-by: Paul Hauner <paul@paulhauner.com> --- book/src/merge-migration.md | 3 +- book/src/run_a_node.md | 3 +- common/eth2_config/src/lib.rs | 17 ++ .../chiado/boot_enr.yaml | 8 + .../chiado/config.yaml | 154 ++++++++++++++++++ .../chiado/deploy_block.txt | 1 + common/eth2_network_config/src/lib.rs | 4 +- 7 files changed, 186 insertions(+), 4 deletions(-) create mode 100644 common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml create mode 100644 common/eth2_network_config/built_in_network_configs/chiado/config.yaml create mode 100644 common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index acca0bbeb3..bab520b569 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -21,7 +21,7 @@ engine to a merge-ready version. ## When? -All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**, **Gnosis**) have successfully undergone the Bellatrix fork and transitioned to a post-merge Network. Your node must have a merge-ready configuration to continue operating. Table below lists the date at which Bellatrix and The Merge occurred: +All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**, **Chiado**, **Gnosis**) have successfully undergone the Bellatrix fork and transitioned to a post-merge Network. Your node must have a merge-ready configuration to continue operating. Table below lists the date at which Bellatrix and The Merge occurred: <div align="center"> @@ -31,6 +31,7 @@ All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln | Sepolia | 20<sup>th</sup> June 2022 | 6<sup>th</sup> July 2022 | | | Goerli | 4<sup>th</sup> August 2022 | 10<sup>th</sup> August 2022 | Previously named `Prater`| | Mainnet | 6<sup>th</sup> September 2022 | 15<sup>th</sup> September 2022 | +| Chiado | 10<sup>th</sup> October 2022 | 4<sup>th</sup> November 2022 | | Gnosis| 30<sup>th</sup> November 2022 | 8<sup>th</sup> December 2022 </div> diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index a31aedf785..1ea1427335 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -58,7 +58,8 @@ Notable flags: - `lighthouse --network mainnet`: Mainnet. - `lighthouse --network goerli`: Goerli (testnet). - `lighthouse --network sepolia`: Sepolia (testnet). - - `lighthouse --network gnosis`: Gnosis chain + - `lighthouse --network chiado`: Chiado (testnet). + - `lighthouse --network gnosis`: Gnosis chain. > Note: Using the correct `--network` flag is very important; using the wrong flag can result in penalties, slashings or lost deposits. As a rule of thumb, *always* diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index bb14d06756..6487151a92 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -33,6 +33,13 @@ const HOLESKY_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url genesis_validators_root: "0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1", }; +const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { + // No default checkpoint sources are provided. + urls: &[], + checksum: "0xd4a039454c7429f1dfaa7e11e397ef3d0f50d2d5e4c0e4dc04919d153aa13af1", + genesis_validators_root: "0x9d642dac73058fbf39c0ae41ab1e34e4d889043cb199851ded7095bc99eb4c1e", +}; + /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone)] pub struct Eth2Config { @@ -306,6 +313,16 @@ define_hardcoded_nets!( // Describes how the genesis state can be obtained. GenesisStateSource::IncludedBytes ), + ( + // Network name (must be unique among all networks). + chiado, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "chiado", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + CHIADO_GENESIS_STATE_SOURCE + ), ( // Network name (must be unique among all networks). sepolia, diff --git a/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml new file mode 100644 index 0000000000..96baffde6f --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml @@ -0,0 +1,8 @@ +# chiado-teku-0 +- "enr:-Ly4QLYLNqrjvSxD3lpAPBUNlxa6cIbe79JqLZLFcZZjWoCjZcw-85agLUErHiygG2weRSCLnd5V460qTbLbwJQsfZkoh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhKq7mu-Jc2VjcDI1NmsxoQP900YAYa9kdvzlSKGjVo-F3XVzATjOYp3BsjLjSophO4hzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA" +# chiado-teku-1 +- "enr:-Ly4QCGeYvTCNOGKi0mKRUd45rLj96b4pH98qG7B9TCUGXGpHZALtaL2-XfjASQyhbCqENccI4PGXVqYTIehNT9KJMQgh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhIuQrVSJc2VjcDI1NmsxoQP9iDchx2PGl3JyJ29B9fhLCvVMN6n23pPAIIeFV-sHOIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA" +#GnosisDAO Bootnode: 3.71.132.231 +- "enr:-Ly4QAtr21x5Ps7HYhdZkIBRBgcBkvlIfEel1YNjtFWf4cV3au2LgBGICz9PtEs9-p2HUl_eME8m1WImxTxSB3AkCMwBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhANHhOeJc2VjcDI1NmsxoQNLp1QPV8-pyMCohOtj6xGtSBM_GtVTqzlbvNsCF4ezkYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA" +#GnosisDAO Bootnode: 3.69.35.13 +- "enr:-Ly4QLgn8Bx6faigkKUGZQvd1HDToV2FAxZIiENK-lczruzQb90qJK-4E65ADly0s4__dQOW7IkLMW7ZAyJy2vtiLy8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhANFIw2Jc2VjcDI1NmsxoQMa-fWEy9UJHfOl_lix3wdY5qust78sHAqZnWwEiyqKgYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA" diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml new file mode 100644 index 0000000000..47b285a654 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -0,0 +1,154 @@ +# Extends the mainnet preset +PRESET_BASE: gnosis +# needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis +CONFIG_NAME: chiado + +# Genesis +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 6000 +# 10 October 2022 10:00:00 GMT+0000 +MIN_GENESIS_TIME: 1665396000 +GENESIS_DELAY: 300 + +# Projected time: 2022-11-04T15:00:00.000Z, block: 680928 +TERMINAL_TOTAL_DIFFICULTY: 231707791542740786049188744689299064356246512 + +# Deposit contract +# --------------------------------------------------------------- +# NOTE: Don't use a value too high, or Teku rejects it (4294906129 NOK) +DEPOSIT_CHAIN_ID: 10200 +DEPOSIT_NETWORK_ID: 10200 +DEPOSIT_CONTRACT_ADDRESS: 0xb97036A26259B7147018913bD58a774cf91acf25 + +# Misc +# --------------------------------------------------------------- +# 2**6 (= 64) +MAX_COMMITTEES_PER_SLOT: 64 +# 2**7 (= 128) +TARGET_COMMITTEE_SIZE: 128 +# 2**11 (= 2,048) +MAX_VALIDATORS_PER_COMMITTEE: 2048 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**12 (= 4096) +CHURN_LIMIT_QUOTIENT: 4096 +# See issue 563 +SHUFFLE_ROUND_COUNT: 90 +# 4 +HYSTERESIS_QUOTIENT: 4 +# 1 (minus 0.25) +HYSTERESIS_DOWNWARD_MULTIPLIER: 1 +# 5 (plus 1.25) +HYSTERESIS_UPWARD_MULTIPLIER: 5 +# Validator +# --------------------------------------------------------------- +# 2**10 (= 1024) ~1.4 hour +ETH1_FOLLOW_DISTANCE: 1024 +# 2**4 (= 16) +TARGET_AGGREGATORS_PER_COMMITTEE: 16 +# 2**0 (= 1) +RANDOM_SUBNETS_PER_VALIDATOR: 1 +# 2**8 (= 256) +EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 +# 6 (estimate from xDai mainnet) +SECONDS_PER_ETH1_BLOCK: 6 + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MAX_EFFECTIVE_BALANCE: 32000000000 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**0 * 10**9 (= 1,000,000,000) Gwei +EFFECTIVE_BALANCE_INCREMENT: 1000000000 +# Initial values +# --------------------------------------------------------------- +# GBC area code +GENESIS_FORK_VERSION: 0x0000006f +BLS_WITHDRAWAL_PREFIX: 0x00 +# Time parameters +# --------------------------------------------------------------- +# 5 seconds +SECONDS_PER_SLOT: 5 +# 2**0 (= 1) slots 12 seconds +MIN_ATTESTATION_INCLUSION_DELAY: 1 +# 2**4 (= 16) slots 1.87 minutes +SLOTS_PER_EPOCH: 16 +# 2**0 (= 1) epochs 1.87 minutes +MIN_SEED_LOOKAHEAD: 1 +# 2**2 (= 4) epochs 7.47 minutes +MAX_SEED_LOOKAHEAD: 4 +# 2**6 (= 64) epochs ~2 hours +EPOCHS_PER_ETH1_VOTING_PERIOD: 64 +# 2**13 (= 8,192) slots ~15.9 hours +SLOTS_PER_HISTORICAL_ROOT: 8192 +# 2**8 (= 256) epochs ~8 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~8 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**2 (= 4) epochs 7.47 minutes +MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 + +# State vector lengths +# --------------------------------------------------------------- +# 2**16 (= 65,536) epochs ~85 days +EPOCHS_PER_HISTORICAL_VECTOR: 65536 +# 2**13 (= 8,192) epochs ~10.6 days +EPOCHS_PER_SLASHINGS_VECTOR: 8192 +# 2**24 (= 16,777,216) historical roots, ~15,243 years +HISTORICAL_ROOTS_LIMIT: 16777216 +# 2**40 (= 1,099,511,627,776) validator spots +VALIDATOR_REGISTRY_LIMIT: 1099511627776 +# Reward and penalty quotients +# --------------------------------------------------------------- +# 25 +BASE_REWARD_FACTOR: 25 +# 2**9 (= 512) +WHISTLEBLOWER_REWARD_QUOTIENT: 512 +# 2**3 (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# 2**26 (= 67,108,864) +INACTIVITY_PENALTY_QUOTIENT: 67108864 +# 2**7 (= 128) (lower safety margin at Phase 0 genesis) +MIN_SLASHING_PENALTY_QUOTIENT: 128 +# 1 (lower safety margin at Phase 0 genesis) +PROPORTIONAL_SLASHING_MULTIPLIER: 1 +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_PROPOSER_SLASHINGS: 16 +# 2**1 (= 2) +MAX_ATTESTER_SLASHINGS: 2 +# 2**7 (= 128) +MAX_ATTESTATIONS: 128 +# 2**4 (= 16) +MAX_DEPOSITS: 16 +# 2**4 (= 16) +MAX_VOLUNTARY_EXITS: 16 +# Signature domains +# --------------------------------------------------------------- +DOMAIN_BEACON_PROPOSER: 0x00000000 +DOMAIN_BEACON_ATTESTER: 0x01000000 +DOMAIN_RANDAO: 0x02000000 +DOMAIN_DEPOSIT: 0x03000000 +DOMAIN_VOLUNTARY_EXIT: 0x04000000 +DOMAIN_SELECTION_PROOF: 0x05000000 +DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 +DOMAIN_SYNC_COMMITTEE: 0x07000000 +DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF: 0x08000000 +DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000 + +# Altair +ALTAIR_FORK_VERSION: 0x0100006f +ALTAIR_FORK_EPOCH: 90 # Mon Oct 10 2022 12:00:00 GMT+0000 +# Bellatrix +BELLATRIX_FORK_VERSION: 0x0200006f +BELLATRIX_FORK_EPOCH: 180 # Mon Oct 10 2022 14:00:00 GMT+0000 +# Capella +CAPELLA_FORK_VERSION: 0x0300006f +CAPELLA_FORK_EPOCH: 244224 # Wed May 24 2023 13:12:00 GMT+0000 + +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 diff --git a/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt @@ -0,0 +1 @@ +0 diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 1206b8b8ca..769f656e85 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -482,10 +482,10 @@ mod tests { fn hard_coded_nets_work() { for net in HARDCODED_NETS { let config = Eth2NetworkConfig::from_hardcoded_net(net) - .unwrap_or_else(|_| panic!("{:?}", net.name)); + .unwrap_or_else(|e| panic!("{:?}: {:?}", net.name, e)); // Ensure we can parse the YAML config to a chain spec. - if net.name == types::GNOSIS { + if config.config.preset_base == types::GNOSIS { config.chain_spec::<GnosisEthSpec>().unwrap(); } else { config.chain_spec::<MainnetEthSpec>().unwrap(); From 41ac9b6a08725e66b617427caa68bc413bb7f4cd Mon Sep 17 00:00:00 2001 From: Jimmy Chen <jchen.tc@gmail.com> Date: Wed, 30 Aug 2023 21:41:35 +1000 Subject: [PATCH 72/75] Pin foundry toolchain version to fix stuck CI jobs --- .github/workflows/test-suite.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 91a0b73453..5819f80484 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -67,6 +67,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in release run: make test-release release-tests-windows: @@ -88,6 +90,8 @@ jobs: npm config set msvs_version 2019 - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Install make run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 @@ -143,6 +147,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in debug run: make test-debug state-transition-vectors-ubuntu: @@ -189,6 +195,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim merge-transition-ubuntu: @@ -201,6 +209,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run the beacon chain sim and go through the merge transition run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: @@ -223,6 +233,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run the syncing simulator run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: From e99ba3a14e5f85011ffed081c9c4cb1dabb772fe Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Thu, 31 Aug 2023 02:12:35 +0000 Subject: [PATCH 73/75] Release v4.4.0 (#4673) ## Issue Addressed NA ## Proposed Changes Bump versions from `v4.3.0` to `v4.4.0`. ## Additional Info NA --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 27d7b655e3..9afe6fe183 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -606,7 +606,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.3.0" +version = "4.4.0" dependencies = [ "beacon_chain", "clap", @@ -805,7 +805,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "4.3.0" +version = "4.4.0" dependencies = [ "beacon_node", "clap", @@ -3880,7 +3880,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.3.0" +version = "4.4.0" dependencies = [ "account_utils", "beacon_chain", @@ -4467,7 +4467,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.3.0" +version = "4.4.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index a0e46705b7..e7d24e8ab4 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.3.0" +version = "4.4.0" authors = [ "Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com", diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index c3dd3bd193..3db521c0e2 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "4.3.0" +version = "4.4.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index e874432fbc..65f57531d3 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.3.0-", - fallback = "Lighthouse/v4.3.0" + prefix = "Lighthouse/v4.4.0-", + fallback = "Lighthouse/v4.4.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index f9d0a6a31c..8b838ac6f8 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.3.0" +version = "4.4.0" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 8003236f2d..d836c4d96d 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "4.3.0" +version = "4.4.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false From 74eb26764371ba86d32ed7fe2b2fe6a83562b053 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Thu, 31 Aug 2023 11:18:00 +0000 Subject: [PATCH 74/75] Remove double-locking deadlock from HTTP API (#4687) ## Issue Addressed Fix a deadlock introduced in #4236 which was caught during the v4.4.0 release testing cycle (with thanks to @paulhauner and `gdb`). ## Proposed Changes Avoid re-locking the fork choice read lock when querying a state by root in the HTTP API. This avoids a deadlock due to the lock already being held. ## Additional Info The [RwLock docs](https://docs.rs/lock_api/latest/lock_api/struct.RwLock.html#method.read) explicitly advise against re-locking: > Note that attempts to recursively acquire a read lock on a RwLock when the current thread already holds one may result in a deadlock. --- beacon_node/http_api/src/state_id.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 5e86053771..1a76333e2d 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -89,9 +89,7 @@ impl StateId { } else { // This block is either old and finalized, or recent and unfinalized, so // it's safe to fallback to the optimistic status of the finalized block. - chain - .canonical_head - .fork_choice_read_lock() + fork_choice .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)? From 2841f60686d642fcc0785c884d43e34e47a800dc Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Mon, 4 Sep 2023 02:56:52 +0000 Subject: [PATCH 75/75] Release v4.4.1 (#4690) ## Proposed Changes New release to replace the cancelled v4.4.0 release. This release includes the bugfix #4687 which avoids a deadlock that was present in v4.4.0. ## Additional Info Awaiting testing over the weekend this will be merged Monday September 4th. --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9afe6fe183..ad7b6fe14c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -606,7 +606,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.4.0" +version = "4.4.1" dependencies = [ "beacon_chain", "clap", @@ -805,7 +805,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "4.4.0" +version = "4.4.1" dependencies = [ "beacon_node", "clap", @@ -3880,7 +3880,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.4.0" +version = "4.4.1" dependencies = [ "account_utils", "beacon_chain", @@ -4467,7 +4467,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.4.0" +version = "4.4.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index e7d24e8ab4..3f4c93fa4d 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.4.0" +version = "4.4.1" authors = [ "Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com", diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 3db521c0e2..fcbe8f84d2 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "4.4.0" +version = "4.4.1" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 65f57531d3..567714e5ca 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.4.0-", - fallback = "Lighthouse/v4.4.0" + prefix = "Lighthouse/v4.4.1-", + fallback = "Lighthouse/v4.4.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 8b838ac6f8..704e0a657e 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.4.0" +version = "4.4.1" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index d836c4d96d..012276f4ce 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "4.4.0" +version = "4.4.1" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false