From d0efb6b18af9a274e4059b6bc953f470f00c18b1 Mon Sep 17 00:00:00 2001 From: pinkiebell <40266861+pinkiebell@users.noreply.github.com> Date: Wed, 19 Oct 2022 22:55:49 +0000 Subject: [PATCH 01/37] beacon_node: add --disable-deposit-contract-sync flag (#3597) Overrides any previous option that enables the eth1 service. Useful for operating a `light` beacon node. Co-authored-by: Michael Sproul --- beacon_node/client/src/builder.rs | 4 +++- beacon_node/src/cli.rs | 8 ++++++++ beacon_node/src/config.rs | 5 +++++ lighthouse/tests/beacon_node.rs | 34 +++++++++++++++++++++++++++++++ 4 files changed, 50 insertions(+), 1 deletion(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index a46d91ad1e..efd91cfdf6 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -457,7 +457,9 @@ where ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?, }; - self.eth1_service = eth1_service_option; + if config.sync_eth1_chain { + self.eth1_service = eth1_service_option; + } self.beacon_chain_builder = Some(beacon_chain_builder); Ok(self) } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0b7518b957..81a7c6bbeb 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -852,4 +852,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { failure caused by the execution layer.") .takes_value(false) ) + .arg( + Arg::with_name("disable-deposit-contract-sync") + .long("disable-deposit-contract-sync") + .help("Explictly disables syncing of deposit logs from the execution node. \ + This overrides any previous option that depends on it. \ + Useful if you intend to run a non-validating beacon node.") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 7666134b41..3b94c31290 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -668,6 +668,11 @@ pub fn get_config( client_config.chain.enable_lock_timeouts = false; } + // Note: This overrides any previous flags that enable this option. + if cli_args.is_present("disable-deposit-contract-sync") { + client_config.sync_eth1_chain = false; + } + if let Some(timeout) = clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? { diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 34041a82c8..b1498f109d 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1527,3 +1527,37 @@ fn enabled_disable_log_timestamp_flag() { assert!(config.logger_config.disable_log_timestamp); }); } + +#[test] +fn sync_eth1_chain_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); +} + +#[test] +fn sync_eth1_chain_execution_endpoints_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("execution-endpoints", Some("http://localhost:8551/")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); +} + +#[test] +fn sync_eth1_chain_disable_deposit_contract_sync_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("disable-deposit-contract-sync", None) + .flag("execution-endpoints", Some("http://localhost:8551/")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); +} From dbb93cd0d268513017d75936d9097732cf2d5ee3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 19 Oct 2022 22:55:50 +0000 Subject: [PATCH 02/37] bors: require slasher and syncing sim tests (#3645) ## Issue Addressed I noticed that [this build](https://github.com/sigp/lighthouse/actions/runs/3269950873/jobs/5378036501) wasn't marked failed by Bors when the `syncing-simulator-ubuntu` job failed. This is because that job is absent from the `bors.toml` config. ## Proposed Changes Add missing jobs to Bors config so that they are required: - `syncing-simulator-ubuntu` - `slasher-tests` - `disallowed-from-async-lint` The `disallowed-from-async-lint` was previously allowed to fail because it was considered beta, but I think it's stable enough now we may as well require it. --- bors.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bors.toml b/bors.toml index 0ff5d6231b..6edf55bfa3 100644 --- a/bors.toml +++ b/bors.toml @@ -20,7 +20,10 @@ status = [ "doppelganger-protection-test", "execution-engine-integration-ubuntu", "cargo-vendor", - "check-msrv" + "check-msrv", + "slasher-tests", + "syncing-simulator-ubuntu", + "disallowed-from-async-lint" ] use_squash_merge = true timeout_sec = 10800 From 3a5888e53d03e4cdd8f52639ffcee06ea24fb1c0 Mon Sep 17 00:00:00 2001 From: Divma Date: Mon, 24 Oct 2022 21:39:30 +0000 Subject: [PATCH 03/37] Ban and unban peers at the swarm level (#3653) ## Issue Addressed I missed this from https://github.com/sigp/lighthouse/pull/3491. peers were being banned at the behaviour level only. The identify errors are explained by this as well ## Proposed Changes Add banning and unbanning ## Additional Info Befor,e having tests that catch this was hard because the swarm was outside the behaviour. We could now have tests that prevent something like this in the future --- beacon_node/lighthouse_network/src/service/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 53d29ccb21..97d96d171d 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1342,10 +1342,12 @@ impl Network { Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { + self.swarm.ban_peer_id(peer_id); self.discovery_mut().ban_peer(&peer_id, associated_ips); Some(NetworkEvent::PeerBanned(peer_id)) } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { + self.swarm.unban_peer_id(peer_id); self.discovery_mut().unban_peer(&peer_id, associated_ips); Some(NetworkEvent::PeerUnbanned(peer_id)) } From fcfd02aeec435203269b03865e3ccc23e5f51e6d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 25 Oct 2022 06:36:51 +0000 Subject: [PATCH 04/37] Release v3.2.0 (#3647) ## Issue Addressed NA ## Proposed Changes Bump version to `v3.2.0` ## Additional Info - ~~Blocked on #3597~~ - ~~Blocked on #3645~~ - ~~Blocked on #3653~~ - ~~Requires additional testing~~ --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 34c932307d..4616ee8a4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,7 +439,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.1.2" +version = "3.2.0" dependencies = [ "beacon_chain", "clap", @@ -597,7 +597,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.1.2" +version = "3.2.0" dependencies = [ "beacon_node", "clap", @@ -3105,7 +3105,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.1.2" +version = "3.2.0" dependencies = [ "account_utils", "beacon_chain", @@ -3605,7 +3605,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.1.2" +version = "3.2.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index d48dd6cac0..e4441dcbd6 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.1.2" +version = "3.2.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 3f406e88fc..5d5f7ce723 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.1.2-", - fallback = "Lighthouse/v3.1.2" + prefix = "Lighthouse/v3.2.0-", + fallback = "Lighthouse/v3.2.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 6b9bb33838..ad9e9f49a1 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.1.2" +version = "3.2.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 94bb62b1af..3a40d5a541 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.1.2" +version = "3.2.0" authors = ["Sigma Prime "] edition = "2021" autotests = false From 77eabc5401223d7de06a55204d69e68a92e0a54d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 26 Oct 2022 06:50:04 +0000 Subject: [PATCH 05/37] Revert "Optimise HTTP validator lookups" (#3658) ## Issue Addressed This reverts commit ca9dc8e0947a0ec83f31830aaabc1ffbd3c14c9c (PR #3559) with some modifications. ## Proposed Changes Unfortunately that PR introduced a performance regression in fork choice. The optimisation _intended_ to build the exit and pubkey caches on the head state _only if_ they were not already built. However, due to the head state always being cloned without these caches, we ended up building them every time the head changed, leading to a ~70ms+ penalty on mainnet. https://github.com/sigp/lighthouse/blob/fcfd02aeec435203269b03865e3ccc23e5f51e6d/beacon_node/beacon_chain/src/canonical_head.rs#L633-L636 I believe this is a severe enough regression to justify immediately releasing v3.2.1 with this change. ## Additional Info I didn't fully revert #3559, because there were some unrelated deletions of dead code in that PR which I figured we may as well keep. An alternative would be to clone the extra caches, but this likely still imposes some cost, so in the interest of applying a conservative fix quickly, I think reversion is the best approach. The optimisation from #3559 was not even optimising a particularly significant path, it was mostly for VCs running larger numbers of inactive keys. We can re-do it in the `tree-states` world where cache clones are cheap. --- .../beacon_chain/src/canonical_head.rs | 6 +++--- beacon_node/http_api/src/lib.rs | 21 ++----------------- consensus/types/src/beacon_state.rs | 15 ------------- 3 files changed, 5 insertions(+), 37 deletions(-) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 53e0fbaac9..c9bd6db0e6 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -654,11 +654,11 @@ impl BeaconChain { }) }) .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build all the - // caches except the tree hash cache. + // Regardless of where we got the state from, attempt to build the committee + // caches. snapshot .beacon_state - .build_all_caches(&self.spec) + .build_all_committee_caches(&self.spec) .map_err(Into::into) .map(|()| snapshot) })?; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 51e97c893d..5b4fa5816d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -668,10 +668,9 @@ pub fn serve( "Invalid validator ID".to_string(), )) })) - .and(log_filter.clone()) .and(warp::path::end()) .and_then( - |state_id: StateId, chain: Arc>, validator_id: ValidatorId, log| { + |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { blocking_json_task(move || { let (data, execution_optimistic) = state_id .map_state_and_execution_optimistic( @@ -679,23 +678,7 @@ pub fn serve( |state, execution_optimistic| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { - // Fast path: use the pubkey cache which is probably - // initialised at the head. - match state.get_validator_index_read_only(pubkey) { - Ok(result) => result, - Err(e) => { - // Slow path, fall back to iteration. - debug!( - log, - "Validator look-up cache miss"; - "reason" => ?e, - ); - state - .validators() - .iter() - .position(|v| v.pubkey == *pubkey) - } - } + state.validators().iter().position(|v| v.pubkey == *pubkey) } ValidatorId::Index(index) => Some(*index as usize), }; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 46a431d073..a5d00cdf2d 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -447,21 +447,6 @@ impl BeaconState { Ok(self.pubkey_cache().get(pubkey)) } - /// Immutable variant of `get_validator_index` which errors if the cache is not up to date. - pub fn get_validator_index_read_only( - &self, - pubkey: &PublicKeyBytes, - ) -> Result, Error> { - let pubkey_cache = self.pubkey_cache(); - if pubkey_cache.len() != self.validators().len() { - return Err(Error::PubkeyCacheIncomplete { - cache_len: pubkey_cache.len(), - registry_len: self.validators().len(), - }); - } - Ok(pubkey_cache.get(pubkey)) - } - /// The epoch corresponding to `self.slot()`. pub fn current_epoch(&self) -> Epoch { self.slot().epoch(T::slots_per_epoch()) From 6d5a2b509fac7b6ffe693866f58ba49989f946d7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 26 Oct 2022 09:38:25 +0000 Subject: [PATCH 06/37] Release v3.2.1 (#3660) ## Proposed Changes Patch release to include the performance regression fix https://github.com/sigp/lighthouse/pull/3658. ## Additional Info ~~Blocked on the merge of https://github.com/sigp/lighthouse/pull/3658.~~ --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4616ee8a4b..ad6aac8f42 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,7 +439,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.2.0" +version = "3.2.1" dependencies = [ "beacon_chain", "clap", @@ -597,7 +597,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.2.0" +version = "3.2.1" dependencies = [ "beacon_node", "clap", @@ -3105,7 +3105,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.2.0" +version = "3.2.1" dependencies = [ "account_utils", "beacon_chain", @@ -3605,7 +3605,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.2.0" +version = "3.2.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index e4441dcbd6..b85aae2f4f 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.2.0" +version = "3.2.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 5d5f7ce723..a48ba211d9 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.2.0-", - fallback = "Lighthouse/v3.2.0" + prefix = "Lighthouse/v3.2.1-", + fallback = "Lighthouse/v3.2.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index ad9e9f49a1..b4f630ae15 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.2.0" +version = "3.2.1" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 3a40d5a541..864869a149 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.2.0" +version = "3.2.1" authors = ["Sigma Prime "] edition = "2021" autotests = false From f2f920dec8533e123991b1c7cfcb3c5bf09e68cc Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 28 Oct 2022 03:23:49 +0000 Subject: [PATCH 07/37] Added lightclient server side containers (#3655) ## Issue Addressed This PR partially addresses #3651 ## Proposed Changes This PR adds the following containers types from [the lightclient specs](https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md): `LightClientUpdate`, `LightClientFinalityUpdate`, `LightClientOptimisticUpdate` and `LightClientBootstrap`. It also implements the creation of each updates as delined by this [document](https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/full-node.md). ## Additional Info Here is a brief description of what each of these container signify: `LightClientUpdate`: This container is only provided by server (full node) to lightclients when catching up new sync committees beetwen periods and we want possibly one lightclient update ready for each post-altair period the lighthouse node go over. it is needed in the resp/req in method `light_client_update_by_range`. `LightClientFinalityUpdate/LightClientFinalityUpdate`: Lighthouse will need only the latest of each of this kind of updates, so no need to store them in the database, we can just store the latest one of each one in memory and then just supply them via gossip or respreq, only the latest ones are served by a full node. finality updates marks the transition to a new finalized header, while optimistic updates signify new non-finalized header which are imported optimistically. `LightClientBootstrap`: This object is retrieved by lightclients during the bootstrap process after a finalized checkpoint is retrieved, ideally we want to store a LightClientBootstrap for each finalized root and then serve each of them by finalized root in respreq protocol id `light_client_bootstrap`. Little digression to how we implement the creation of each updates: the creation of a optimistic/finality update is just a version of the lightclient_update creation mechanism with less fields being set, there is underlying concept of inheritance, if you look at the specs it becomes very obvious that a lightclient update is just an extension of a finality update and a finality update an extension to an optimistic update. ## Extra note `LightClientStore` is not implemented as it is only useful as internal storage design for the lightclient side. --- consensus/types/src/lib.rs | 3 + consensus/types/src/light_client_bootstrap.rs | 45 +++++ .../types/src/light_client_finality_update.rs | 80 ++++++++ .../src/light_client_optimistic_update.rs | 59 ++++++ consensus/types/src/light_client_update.rs | 171 ++++++++++++++++++ 5 files changed, 358 insertions(+) create mode 100644 consensus/types/src/light_client_bootstrap.rs create mode 100644 consensus/types/src/light_client_finality_update.rs create mode 100644 consensus/types/src/light_client_optimistic_update.rs create mode 100644 consensus/types/src/light_client_update.rs diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 32300173eb..7f618dc348 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -49,6 +49,9 @@ pub mod free_attestation; pub mod graffiti; pub mod historical_batch; pub mod indexed_attestation; +pub mod light_client_bootstrap; +pub mod light_client_optimistic_update; +pub mod light_client_update; pub mod pending_attestation; pub mod proposer_preparation_data; pub mod proposer_slashing; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs new file mode 100644 index 0000000000..406136d542 --- /dev/null +++ b/consensus/types/src/light_client_bootstrap.rs @@ -0,0 +1,45 @@ +use super::{BeaconBlockHeader, BeaconState, EthSpec, FixedVector, Hash256, SyncCommittee}; +use crate::{light_client_update::*, test_utils::TestRandom}; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; + +/// A LightClientBootstrap is the initializer we send over to lightclient nodes +/// that are trying to generate their basic storage when booting up. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct LightClientBootstrap { + /// Requested beacon block header. + pub header: BeaconBlockHeader, + /// The `SyncCommittee` used in the requested period. + pub current_sync_committee: Arc>, + /// Merkle proof for sync committee + pub current_sync_committee_branch: FixedVector, +} + +impl LightClientBootstrap { + pub fn from_beacon_state(beacon_state: BeaconState) -> Result { + let mut header = beacon_state.latest_block_header().clone(); + header.state_root = beacon_state.tree_hash_root(); + Ok(LightClientBootstrap { + header, + current_sync_committee: beacon_state.current_sync_committee()?.clone(), + /// TODO(Giulio2002): Generate Merkle Proof, this is just empty hashes + current_sync_committee_branch: FixedVector::new(vec![ + Hash256::zero(); + CURRENT_SYNC_COMMITTEE_PROOF_LEN + ])?, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + + ssz_tests!(LightClientBootstrap); +} diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs new file mode 100644 index 0000000000..c93d15a1a0 --- /dev/null +++ b/consensus/types/src/light_client_finality_update.rs @@ -0,0 +1,80 @@ +use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; +use crate::{light_client_update::*, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; +use safe_arith::ArithError; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::typenum::{U5, U6}; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; + +/// A LightClientFinalityUpdate is the update lightclient request or received by a gossip that +/// signal a new finalized beacon block header for the light client sync protocol. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct LightClientFinalityUpdate { + /// The last `BeaconBlockHeader` from the last attested block by the sync committee. + pub attested_header: BeaconBlockHeader, + /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). + pub finalized_header: BeaconBlockHeader, + /// Merkle proof attesting finalized header. + pub finality_branch: FixedVector, + /// current sync aggreggate + pub sync_aggregate: SyncAggregate, + /// Slot of the sync aggregated singature + pub signature_slot: Slot, +} + +impl LightClientFinalityUpdate { + pub fn new( + chain_spec: ChainSpec, + beacon_state: BeaconState, + block: BeaconBlock, + attested_state: BeaconState, + finalized_block: BeaconBlock, + ) -> Result { + let altair_fork_epoch = chain_spec + .altair_fork_epoch + .ok_or(Error::AltairForkNotActive)?; + if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { + return Err(Error::AltairForkNotActive); + } + + let sync_aggregate = block.body().sync_aggregate()?; + if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { + return Err(Error::NotEnoughSyncCommitteeParticipants); + } + + // Compute and validate attested header. + let mut attested_header = attested_state.latest_block_header().clone(); + attested_header.state_root = attested_state.tree_hash_root(); + // Build finalized header from finalized block + let finalized_header = BeaconBlockHeader { + slot: finalized_block.slot(), + proposer_index: finalized_block.proposer_index(), + parent_root: finalized_block.parent_root(), + state_root: finalized_block.state_root(), + body_root: finalized_block.body_root(), + }; + if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { + return Err(Error::InvalidFinalizedBlock); + } + // TODO(Giulio2002): compute proper merkle proofs. + Ok(Self { + attested_header: attested_header, + finalized_header: finalized_header, + finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + sync_aggregate: sync_aggregate.clone(), + signature_slot: block.slot(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + + ssz_tests!(LightClientFinalityUpdate); +} diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs new file mode 100644 index 0000000000..9592bf1c23 --- /dev/null +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -0,0 +1,59 @@ +use super::{BeaconBlockHeader, EthSpec, Slot, SyncAggregate}; +use crate::{ + light_client_update::Error, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec, +}; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; + +/// A LightClientOptimisticUpdate is the update we send on each slot, +/// it is based off the current unfinalized epoch is verified only against BLS signature. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct LightClientOptimisticUpdate { + /// The last `BeaconBlockHeader` from the last attested block by the sync committee. + pub attested_header: BeaconBlockHeader, + /// current sync aggreggate + pub sync_aggregate: SyncAggregate, + /// Slot of the sync aggregated singature + pub signature_slot: Slot, +} + +impl LightClientOptimisticUpdate { + pub fn new( + chain_spec: ChainSpec, + block: BeaconBlock, + attested_state: BeaconState, + ) -> Result { + let altair_fork_epoch = chain_spec + .altair_fork_epoch + .ok_or(Error::AltairForkNotActive)?; + if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { + return Err(Error::AltairForkNotActive); + } + + let sync_aggregate = block.body().sync_aggregate()?; + if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { + return Err(Error::NotEnoughSyncCommitteeParticipants); + } + + // Compute and validate attested header. + let mut attested_header = attested_state.latest_block_header().clone(); + attested_header.state_root = attested_state.tree_hash_root(); + Ok(Self { + attested_header, + sync_aggregate: sync_aggregate.clone(), + signature_slot: block.slot(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + + ssz_tests!(LightClientOptimisticUpdate); +} diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs new file mode 100644 index 0000000000..38609cf1bc --- /dev/null +++ b/consensus/types/src/light_client_update.rs @@ -0,0 +1,171 @@ +use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; +use crate::{beacon_state, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; +use safe_arith::ArithError; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::typenum::{U5, U6}; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; + +pub const FINALIZED_ROOT_INDEX: usize = 105; +pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; +pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; + +pub type FinalizedRootProofLen = U6; +pub type CurrentSyncCommitteeProofLen = U5; +pub type NextSyncCommitteeProofLen = U5; + +pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; + +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + SszTypesError(ssz_types::Error), + BeaconStateError(beacon_state::Error), + ArithError(ArithError), + AltairForkNotActive, + NotEnoughSyncCommitteeParticipants, + MismatchingPeriods, + InvalidFinalizedBlock, +} + +impl From for Error { + fn from(e: ssz_types::Error) -> Error { + Error::SszTypesError(e) + } +} + +impl From for Error { + fn from(e: beacon_state::Error) -> Error { + Error::BeaconStateError(e) + } +} + +impl From for Error { + fn from(e: ArithError) -> Error { + Error::ArithError(e) + } +} + +/// A LightClientUpdate is the update we request solely to either complete the bootstraping process, +/// or to sync up to the last committee period, we need to have one ready for each ALTAIR period +/// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct LightClientUpdate { + /// The last `BeaconBlockHeader` from the last attested block by the sync committee. + pub attested_header: BeaconBlockHeader, + /// The `SyncCommittee` used in the next period. + pub next_sync_committee: Arc>, + /// Merkle proof for next sync committee + pub next_sync_committee_branch: FixedVector, + /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). + pub finalized_header: BeaconBlockHeader, + /// Merkle proof attesting finalized header. + pub finality_branch: FixedVector, + /// current sync aggreggate + pub sync_aggregate: SyncAggregate, + /// Slot of the sync aggregated singature + pub signature_slot: Slot, +} + +impl LightClientUpdate { + pub fn new( + chain_spec: ChainSpec, + beacon_state: BeaconState, + block: BeaconBlock, + attested_state: BeaconState, + finalized_block: BeaconBlock, + ) -> Result { + let altair_fork_epoch = chain_spec + .altair_fork_epoch + .ok_or(Error::AltairForkNotActive)?; + if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { + return Err(Error::AltairForkNotActive); + } + + let sync_aggregate = block.body().sync_aggregate()?; + if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { + return Err(Error::NotEnoughSyncCommitteeParticipants); + } + + let signature_period = block.epoch().sync_committee_period(&chain_spec)?; + // Compute and validate attested header. + let mut attested_header = attested_state.latest_block_header().clone(); + attested_header.state_root = attested_state.tree_hash_root(); + let attested_period = attested_header + .slot + .epoch(T::slots_per_epoch()) + .sync_committee_period(&chain_spec)?; + if attested_period != signature_period { + return Err(Error::MismatchingPeriods); + } + // Build finalized header from finalized block + let finalized_header = BeaconBlockHeader { + slot: finalized_block.slot(), + proposer_index: finalized_block.proposer_index(), + parent_root: finalized_block.parent_root(), + state_root: finalized_block.state_root(), + body_root: finalized_block.body_root(), + }; + if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { + return Err(Error::InvalidFinalizedBlock); + } + // TODO(Giulio2002): compute proper merkle proofs. + Ok(Self { + attested_header, + next_sync_committee: attested_state.next_sync_committee()?.clone(), + next_sync_committee_branch: FixedVector::new(vec![ + Hash256::zero(); + NEXT_SYNC_COMMITTEE_PROOF_LEN + ])?, + finalized_header, + finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + sync_aggregate: sync_aggregate.clone(), + signature_slot: block.slot(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + use ssz_types::typenum::Unsigned; + + ssz_tests!(LightClientUpdate); + + #[test] + fn finalized_root_params() { + assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); + assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32 + 1) > FINALIZED_ROOT_INDEX); + assert_eq!(FinalizedRootProofLen::to_usize(), FINALIZED_ROOT_PROOF_LEN); + } + + #[test] + fn current_sync_committee_params() { + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN as u32) <= CURRENT_SYNC_COMMITTEE_INDEX + ); + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN as u32 + 1) > CURRENT_SYNC_COMMITTEE_INDEX + ); + assert_eq!( + CurrentSyncCommitteeProofLen::to_usize(), + CURRENT_SYNC_COMMITTEE_PROOF_LEN + ); + } + + #[test] + fn next_sync_committee_params() { + assert!(2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN as u32) <= NEXT_SYNC_COMMITTEE_INDEX); + assert!(2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN as u32 + 1) > NEXT_SYNC_COMMITTEE_INDEX); + assert_eq!( + NextSyncCommitteeProofLen::to_usize(), + NEXT_SYNC_COMMITTEE_PROOF_LEN + ); + } +} From 5bd1501cb1b4fb7cff5ac292f522743d9297191d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kausik=20Das=20=E2=9C=AA?= Date: Fri, 28 Oct 2022 03:23:50 +0000 Subject: [PATCH 08/37] Book spelling and grammar corrections (#3659) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed There are few spelling and grammar errors in the book. ## Proposed Changes Corrected those spelling and grammar errors in the below files - book/src/advanced-release-candidates.md - book/src/advanced_networking.md - book/src/builders.md - book/src/key-management.md - book/src/merge-migration.md - book/src/wallet-create.md Co-authored-by: Kausik Das Co-authored-by: Kausik Das ✪ --- CONTRIBUTING.md | 2 +- book/src/advanced_networking.md | 2 +- book/src/builders.md | 2 +- book/src/key-management.md | 2 +- book/src/merge-migration.md | 2 +- book/src/wallet-create.md | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 489d12eb88..ef23e1ed57 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -46,7 +46,7 @@ questions. (github.com/YOUR_NAME/lighthouse) of the main repository (github.com/sigp/lighthouse). 3. Once you feel you have addressed the issue, **create a pull-request** to merge - your changes in to the main repository. + your changes into the main repository. 4. Wait for the repository maintainers to **review your changes** to ensure the issue is addressed satisfactorily. Optionally, mention your PR on [discord](https://discord.gg/cyAszAh). diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index d6fcb82a6b..fb7f07a51a 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -59,7 +59,7 @@ TCP and UDP ports (9000 by default). Lighthouse has a number of CLI parameters for constructing and modifying the local Ethereum Node Record (ENR). Examples are `--enr-address`, `--enr-udp-port`, `--enr-tcp-port` and `--disable-enr-auto-update`. These -settings allow you construct your initial ENR. Their primary intention is for +settings allow you to construct your initial ENR. Their primary intention is for setting up boot-like nodes and having a contactable ENR on boot. On normal operation of a Lighthouse node, none of these flags need to be set. Setting these flags incorrectly can lead to your node being incorrectly added to the diff --git a/book/src/builders.md b/book/src/builders.md index e57a4fad14..99fae5b3e7 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -188,7 +188,7 @@ with the builder network: INFO Published validator registrations to the builder network ``` -When you succesfully propose a block using a builder, you will see this log on the beacon node: +When you successfully propose a block using a builder, you will see this log on the beacon node: ``` INFO Successfully published a block to the builder network diff --git a/book/src/key-management.md b/book/src/key-management.md index 30d649f346..bb1751be16 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -103,7 +103,7 @@ opt-in). Instead, we assert that since the withdrawal keys can be regenerated from a mnemonic, having them lying around on the file-system only presents risk and complexity. -At the time or writing, we do not expose the commands to regenerate keys from +At the time of writing, we do not expose the commands to regenerate keys from mnemonics. However, key regeneration is tested on the public Lighthouse repository and will be exposed prior to mainnet launch. diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 780be5836d..8596cd942c 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -2,7 +2,7 @@ This document provides detail for users who want to run a merge-ready Lighthouse node. -> The merge is occuring on mainnet in September. You _must_ have a merge-ready setup by September 6 +> The merge is occurring on mainnet in September. You _must_ have a merge-ready setup by September 6 > 2022. ## Necessary Configuration diff --git a/book/src/wallet-create.md b/book/src/wallet-create.md index 0ebb449177..25cac8d34d 100644 --- a/book/src/wallet-create.md +++ b/book/src/wallet-create.md @@ -11,7 +11,7 @@ backed up, all validator keys can be trivially re-generated. The 24-word string is randomly generated during wallet creation and printed out to the terminal. It's important to **make one or more backups of the mnemonic** -to ensure your ETH is not lost in the case of data loss. It very important to +to ensure your ETH is not lost in the case of data loss. It is very important to **keep your mnemonic private** as it represents the ultimate control of your ETH. From 46fbf5b98b5a32c49e757da6d0c239c25a35eeb7 Mon Sep 17 00:00:00 2001 From: Divma Date: Fri, 28 Oct 2022 05:40:06 +0000 Subject: [PATCH 09/37] Update discv5 (#3171) ## Issue Addressed Updates discv5 Pending on - [x] #3547 - [x] Alex upgrades his deps ## Proposed Changes updates discv5 and the enr crate. The only relevant change would be some clear indications of ipv4 usage in lighthouse ## Additional Info Functionally, this should be equivalent to the prev version. As draft pending a discv5 release --- Cargo.lock | 618 ++++++------------ beacon_node/execution_layer/Cargo.toml | 8 +- beacon_node/lighthouse_network/Cargo.toml | 4 +- beacon_node/lighthouse_network/src/config.rs | 2 +- .../lighthouse_network/src/discovery/enr.rs | 10 +- .../src/discovery/enr_ext.rs | 29 +- .../lighthouse_network/src/discovery/mod.rs | 21 +- .../src/peer_manager/network_behaviour.rs | 2 +- boot_node/src/server.rs | 10 +- common/deposit_contract/Cargo.toml | 2 +- common/eth2_network_config/Cargo.toml | 2 +- crypto/eth2_hashing/Cargo.toml | 2 +- crypto/eth2_key_derivation/Cargo.toml | 2 +- crypto/eth2_keystore/Cargo.toml | 2 +- lighthouse/tests/boot_node.rs | 4 +- .../execution_engine_integration/Cargo.toml | 4 +- 16 files changed, 270 insertions(+), 452 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ad6aac8f42..04cfd42350 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -258,9 +258,9 @@ dependencies = [ [[package]] name = "auto_impl" -version = "0.5.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7862e21c893d65a1650125d157eaeec691439379a1cee17ee49031b79236ada4" +checksum = "8a8c1df849285fbacd587de7818cc7d13be6cd2cbcd47a04fb1801b0e2706e33" dependencies = [ "proc-macro-error", "proc-macro2", @@ -366,7 +366,7 @@ checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=de34eeb#de34eeb92e4fdee5709d142910abf42cf857609b" +source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7d5d8dad1648f771573f42585ad8080a45b05689#7d5d8dad1648f771573f42585ad8080a45b05689" dependencies = [ "ethereum-consensus", "http", @@ -913,15 +913,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.5.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279bc8fc53f788a75c7804af68237d1fce02cde1e275a886a4b320604dc2aeda" - -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" [[package]] name = "convert_case" @@ -1061,9 +1055,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.3.2" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" dependencies = [ "generic-array", "rand_core 0.6.3", @@ -1272,28 +1266,19 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.9.9", + "sha2 0.10.2", "tree_hash", "types", ] [[package]] name = "der" -version = "0.3.5" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eeb9d92785d1facb50567852ce75d0858630630e7eabea59cf7eb7474051087" +checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" dependencies = [ - "const-oid 0.5.2", - "typenum", -] - -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid 0.7.1", + "const-oid", + "zeroize", ] [[package]] @@ -1403,14 +1388,14 @@ dependencies = [ [[package]] name = "discv5" -version = "0.1.0-beta.13" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed8f54486179d5a7f11e1f5526f49d925a411a96c1141a707bd5f071be2ab630" +checksum = "d767c0e59b3e8d65222d95df723cc2ea1da92bb0f27c563607e6f0bde064f255" dependencies = [ "aes", "aes-gcm", "arrayvec", - "digest 0.10.3", + "delay_map", "enr", "fnv", "futures", @@ -1418,13 +1403,14 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p-core 0.30.2", + "libp2p-core", "lru", + "more-asserts", "parking_lot 0.11.2", "rand 0.8.5", "rlp", - "sha2 0.9.9", "smallvec", + "socket2", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -1442,24 +1428,12 @@ checksum = "5caaa75cbd2b960ff1e5392d2cfb1f44717fffe12fc1f32b7b5d1267f99732a6" [[package]] name = "ecdsa" -version = "0.11.1" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d33b390ab82f2e1481e331dbd0530895640179d2128ef9a79cc690b78d1eba" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der 0.3.5", - "elliptic-curve 0.9.12", - "hmac 0.11.0", - "signature", -] - -[[package]] -name = "ecdsa" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" -dependencies = [ - "der 0.5.1", - "elliptic-curve 0.11.12", + "der", + "elliptic-curve", "rfc6979", "signature", ] @@ -1526,32 +1500,18 @@ checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" [[package]] name = "elliptic-curve" -version = "0.9.12" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" -dependencies = [ - "bitvec 0.20.4", - "ff 0.9.0", - "generic-array", - "group 0.9.0", - "pkcs8 0.6.1", - "rand_core 0.6.3", - "subtle", - "zeroize", -] - -[[package]] -name = "elliptic-curve" -version = "0.11.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", "crypto-bigint", - "der 0.5.1", - "ff 0.11.1", + "der", + "digest 0.10.3", + "ff", "generic-array", - "group 0.11.0", + "group", + "pkcs8", "rand_core 0.6.3", "sec1", "subtle", @@ -1569,21 +1529,21 @@ dependencies = [ [[package]] name = "enr" -version = "0.5.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809869a1328bfb586b48c9c0f87761c47c41793a85bcb06f66074a87cafc1bcd" +checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" dependencies = [ "base64", "bs58", "bytes", "ed25519-dalek", "hex", - "k256 0.8.1", + "k256", "log", "rand 0.8.5", "rlp", "serde", - "sha3 0.9.1", + "sha3 0.10.1", "zeroize", ] @@ -1593,7 +1553,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro2", "quote", "syn", @@ -1768,7 +1728,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.9.9", + "sha2 0.10.2", "zeroize", ] @@ -1941,14 +1901,15 @@ dependencies = [ [[package]] name = "ethereum-consensus" version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e1188b1#e1188b14f320f225f2e53aa10336614565f04129" +source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=a8110af76d97bf2bf27fb987a671808fcbdf1834#a8110af76d97bf2bf27fb987a671808fcbdf1834" dependencies = [ "async-stream", "blst", + "bs58", "enr", "hex", "integer-sqrt", - "multiaddr 0.14.0", + "multiaddr", "rand 0.8.5", "serde", "serde_json", @@ -1989,29 +1950,36 @@ dependencies = [ [[package]] name = "ethers-core" -version = "0.6.0" -source = "git+https://github.com/gakonst/ethers-rs?rev=02ad93a1cfb7b62eb051c77c61dc4c0218428e4a#02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ebdd63c828f58aa067f40f9adcbea5e114fb1f90144b3a1e2858e0c9b1ff4e8" dependencies = [ "arrayvec", "bytes", - "elliptic-curve 0.11.12", + "chrono", + "elliptic-curve", "ethabi 17.1.0", + "fastrlp", "generic-array", "hex", - "k256 0.10.4", + "k256", "rand 0.8.5", "rlp", "rlp-derive", + "rust_decimal", "serde", "serde_json", + "strum", "thiserror", "tiny-keccak", + "unicode-xid", ] [[package]] name = "ethers-providers" -version = "0.6.0" -source = "git+https://github.com/gakonst/ethers-rs?rev=02ad93a1cfb7b62eb051c77c61dc4c0218428e4a#02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e46482e4d1e79b20c338fd9db9e166184eb387f0a4e7c05c5b5c0aa2e8c8900c" dependencies = [ "async-trait", "auto_impl", @@ -2021,11 +1989,13 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", + "getrandom 0.2.7", + "hashers", "hex", "http", "once_cell", "parking_lot 0.11.2", - "pin-project 1.0.11", + "pin-project", "reqwest", "serde", "serde_json", @@ -2141,21 +2111,35 @@ dependencies = [ ] [[package]] -name = "ff" -version = "0.9.0" +name = "fastrlp" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a4d941a5b7c2a75222e2d44fcdf634a67133d9db31e177ae5ff6ecda852bfe" +checksum = "089263294bb1c38ac73649a6ad563dd9a5142c8dc0482be15b8b9acb22a1611e" dependencies = [ - "bitvec 0.20.4", - "rand_core 0.6.3", - "subtle", + "arrayvec", + "auto_impl", + "bytes", + "ethereum-types 0.13.1", + "fastrlp-derive", +] + +[[package]] +name = "fastrlp-derive" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fa41ebc231af281098b11ad4a4f6182ec9096902afffe948034a20d4e1385a" +dependencies = [ + "bytes", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "ff" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" +checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" dependencies = [ "rand_core 0.6.3", "subtle", @@ -2389,6 +2373,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generic-array" version = "0.14.5" @@ -2440,8 +2433,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -2490,22 +2485,11 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "group" -version = "0.9.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" +checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" dependencies = [ - "ff 0.9.0", - "rand_core 0.6.3", - "subtle", -] - -[[package]] -name = "group" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" -dependencies = [ - "ff 0.11.1", + "ff", "rand_core 0.6.3", "subtle", ] @@ -2553,6 +2537,15 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashers" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" +dependencies = [ + "fxhash", +] + [[package]] name = "hashlink" version = "0.7.0" @@ -2587,15 +2580,6 @@ dependencies = [ "http", ] -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "heck" version = "0.4.0" @@ -2625,12 +2609,11 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hkdf" -version = "0.11.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01706d578d5c281058480e673ae4086a9f4710d8df1ad80a5b03e39ece5f886b" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" dependencies = [ - "digest 0.9.0", - "hmac 0.11.0", + "hmac 0.12.1", ] [[package]] @@ -2653,6 +2636,15 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.3", +] + [[package]] name = "hmac-drbg" version = "0.3.0" @@ -3059,27 +3051,15 @@ dependencies = [ [[package]] name = "k256" -version = "0.8.1" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3e8e491ed22bc161583a1c77e42313672c483eba6bd9d7afec0f1131d0b9ce" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ "cfg-if", - "ecdsa 0.11.1", - "elliptic-curve 0.9.12", - "sha2 0.9.9", -] - -[[package]] -name = "k256" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" -dependencies = [ - "cfg-if", - "ecdsa 0.13.4", - "elliptic-curve 0.11.12", - "sec1", - "sha3 0.9.1", + "ecdsa", + "elliptic-curve", + "sha2 0.10.2", + "sha3 0.10.1", ] [[package]] @@ -3231,7 +3211,7 @@ dependencies = [ "getrandom 0.2.7", "instant", "lazy_static", - "libp2p-core 0.36.0", + "libp2p-core", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3244,48 +3224,13 @@ dependencies = [ "libp2p-tcp", "libp2p-websocket", "libp2p-yamux", - "multiaddr 0.14.0", + "multiaddr", "parking_lot 0.12.1", - "pin-project 1.0.11", + "pin-project", "rand 0.7.3", "smallvec", ] -[[package]] -name = "libp2p-core" -version = "0.30.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86aad7d54df283db817becded03e611137698a6509d4237a96881976a162340c" -dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek", - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "lazy_static", - "libsecp256k1", - "log", - "multiaddr 0.13.0", - "multihash 0.14.0", - "multistream-select 0.10.4", - "parking_lot 0.11.2", - "pin-project 1.0.11", - "prost 0.9.0", - "prost-build 0.9.0", - "rand 0.8.5", - "ring", - "rw-stream-sink 0.2.1", - "sha2 0.9.9", - "smallvec", - "thiserror", - "unsigned-varint 0.7.1", - "void", - "zeroize", -] - [[package]] name = "libp2p-core" version = "0.36.0" @@ -3303,15 +3248,16 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "multiaddr 0.14.0", - "multihash 0.16.2", - "multistream-select 0.11.0", + "multiaddr", + "multihash", + "multistream-select", + "p256", "parking_lot 0.12.1", - "pin-project 1.0.11", - "prost 0.11.0", - "prost-build 0.11.1", + "pin-project", + "prost", + "prost-build", "rand 0.8.5", - "rw-stream-sink 0.3.0", + "rw-stream-sink", "sha2 0.10.2", "smallvec", "thiserror", @@ -3327,7 +3273,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6cb3c16e3bb2f76c751ae12f0f26e788c89d353babdded40411e7923f01fc978" dependencies = [ "futures", - "libp2p-core 0.36.0", + "libp2p-core", "log", "parking_lot 0.12.1", "smallvec", @@ -3348,12 +3294,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.36.0", + "libp2p-core", "libp2p-swarm", "log", "prometheus-client", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3371,12 +3317,12 @@ dependencies = [ "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.36.0", + "libp2p-core", "libp2p-swarm", "log", "lru", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "prost-codec", "smallvec", "thiserror", @@ -3389,7 +3335,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a74ab339e8b5d989e8c1000a78adb5c064a6319245bb22d1e70b415ec18c39b8" dependencies = [ - "libp2p-core 0.36.0", + "libp2p-core", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3405,7 +3351,7 @@ dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.36.0", + "libp2p-core", "log", "nohash-hasher", "parking_lot 0.12.1", @@ -3424,10 +3370,10 @@ dependencies = [ "curve25519-dalek 3.2.0", "futures", "lazy_static", - "libp2p-core 0.36.0", + "libp2p-core", "log", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3445,10 +3391,10 @@ dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.36.0", + "libp2p-core", "log", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "unsigned-varint 0.7.1", "void", ] @@ -3464,9 +3410,9 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core 0.36.0", + "libp2p-core", "log", - "pin-project 1.0.11", + "pin-project", "rand 0.7.3", "smallvec", "thiserror", @@ -3479,7 +3425,7 @@ version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f02622b9dd150011b4eeec387f8bd013189a2f27da08ba363e7c6e606d77a48" dependencies = [ - "heck 0.4.0", + "heck", "quote", "syn", ] @@ -3495,7 +3441,7 @@ dependencies = [ "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.36.0", + "libp2p-core", "log", "socket2", "tokio", @@ -3510,11 +3456,11 @@ dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.36.0", + "libp2p-core", "log", "parking_lot 0.12.1", "quicksink", - "rw-stream-sink 0.3.0", + "rw-stream-sink", "soketto", "url", "webpki-roots", @@ -3527,7 +3473,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b74ec8dc042b583f0b2b93d52917f3b374c1e4b1cfa79ee74c7672c41257694c" dependencies = [ "futures", - "libp2p-core 0.36.0", + "libp2p-core", "parking_lot 0.12.1", "thiserror", "yamux", @@ -3680,7 +3626,7 @@ dependencies = [ "regex", "serde", "serde_derive", - "sha2 0.9.9", + "sha2 0.10.2", "slog", "slog-async", "slog-term", @@ -3892,7 +3838,7 @@ dependencies = [ [[package]] name = "mev-build-rs" version = "0.2.1" -source = "git+https://github.com/ralexstokes/mev-rs?rev=a088806575805c00d63fa59c002abc5eb1dc7709#a088806575805c00d63fa59c002abc5eb1dc7709" +source = "git+https://github.com/ralexstokes/mev-rs?rev=6c99b0fbdc0427b1625469d2e575303ce08de5b8#6c99b0fbdc0427b1625469d2e575303ce08de5b8" dependencies = [ "async-trait", "axum", @@ -3981,22 +3927,10 @@ dependencies = [ ] [[package]] -name = "multiaddr" -version = "0.13.0" +name = "more-asserts" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ee4ea82141951ac6379f964f71b20876d43712bea8faf6dd1a375e08a46499" -dependencies = [ - "arrayref", - "bs58", - "byteorder", - "data-encoding", - "multihash 0.14.0", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.1", - "url", -] +checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" [[package]] name = "multiaddr" @@ -4008,7 +3942,7 @@ dependencies = [ "bs58", "byteorder", "data-encoding", - "multihash 0.16.2", + "multihash", "percent-encoding", "serde", "static_assertions", @@ -4016,19 +3950,6 @@ dependencies = [ "url", ] -[[package]] -name = "multihash" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "752a61cd890ff691b4411423d23816d5866dd5621e4d1c5687a53b94b5a979d8" -dependencies = [ - "digest 0.9.0", - "generic-array", - "multihash-derive 0.7.2", - "sha2 0.9.9", - "unsigned-varint 0.7.1", -] - [[package]] name = "multihash" version = "0.16.2" @@ -4037,25 +3958,11 @@ checksum = "e3db354f401db558759dfc1e568d010a5d4146f4d3f637be1275ec4a3cf09689" dependencies = [ "core2", "digest 0.10.3", - "multihash-derive 0.8.0", + "multihash-derive", "sha2 0.10.2", "unsigned-varint 0.7.1", ] -[[package]] -name = "multihash-derive" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "424f6e86263cd5294cbd7f1e95746b95aca0e0d66bff31e5a40d6baa87b4aa99" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", - "synstructure", -] - [[package]] name = "multihash-derive" version = "0.8.0" @@ -4094,20 +4001,6 @@ dependencies = [ "twoway", ] -[[package]] -name = "multistream-select" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56a336acba8bc87c8876f6425407dbbe6c417bf478b22015f8fb0994ef3bc0ab" -dependencies = [ - "bytes", - "futures", - "log", - "pin-project 1.0.11", - "smallvec", - "unsigned-varint 0.7.1", -] - [[package]] name = "multistream-select" version = "0.11.0" @@ -4117,7 +4010,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.11", + "pin-project", "smallvec", "unsigned-varint 0.7.1", ] @@ -4436,6 +4329,17 @@ dependencies = [ "types", ] +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2 0.10.2", +] + [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -4611,33 +4515,13 @@ dependencies = [ "rustc_version 0.4.0", ] -[[package]] -name = "pin-project" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" -dependencies = [ - "pin-project-internal 0.4.30", -] - [[package]] name = "pin-project" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" dependencies = [ - "pin-project-internal 1.0.11", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "pin-project-internal", ] [[package]] @@ -4671,23 +4555,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9c2f795bc591cb3384cb64082a578b89207ac92bb89c9d98c1ea2ace7cd8110" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der 0.3.5", - "spki 0.3.0", -] - -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der 0.5.1", - "spki 0.5.4", - "zeroize", + "der", + "spki", ] [[package]] @@ -4884,16 +4757,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" -dependencies = [ - "bytes", - "prost-derive 0.9.0", -] - [[package]] name = "prost" version = "0.11.0" @@ -4901,27 +4764,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" dependencies = [ "bytes", - "prost-derive 0.11.0", -] - -[[package]] -name = "prost-build" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" -dependencies = [ - "bytes", - "heck 0.3.3", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost 0.9.0", - "prost-types 0.9.0", - "regex", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -4931,14 +4774,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" dependencies = [ "bytes", - "heck 0.4.0", + "heck", "itertools", "lazy_static", "log", "multimap", "petgraph", - "prost 0.11.0", - "prost-types 0.11.1", + "prost", + "prost-types", "regex", "tempfile", "which", @@ -4952,24 +4795,11 @@ checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987" dependencies = [ "asynchronous-codec", "bytes", - "prost 0.11.0", + "prost", "thiserror", "unsigned-varint 0.7.1", ] -[[package]] -name = "prost-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.11.0" @@ -4983,16 +4813,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" -dependencies = [ - "bytes", - "prost 0.9.0", -] - [[package]] name = "prost-types" version = "0.11.1" @@ -5000,7 +4820,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" dependencies = [ "bytes", - "prost 0.11.0", + "prost", ] [[package]] @@ -5336,12 +5156,12 @@ dependencies = [ [[package]] name = "rfc6979" -version = "0.1.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +checksum = "88c86280f057430a52f4861551b092a01b419b8eacefc7c995eacb9dc132fe32" dependencies = [ "crypto-bigint", - "hmac 0.11.0", + "hmac 0.12.1", "zeroize", ] @@ -5412,6 +5232,17 @@ dependencies = [ "smallvec", ] +[[package]] +name = "rust_decimal" +version = "1.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" +dependencies = [ + "arrayvec", + "num-traits", + "serde", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -5497,17 +5328,6 @@ version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8" -[[package]] -name = "rw-stream-sink" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" -dependencies = [ - "futures", - "pin-project 0.4.30", - "static_assertions", -] - [[package]] name = "rw-stream-sink" version = "0.3.0" @@ -5515,7 +5335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" dependencies = [ "futures", - "pin-project 1.0.11", + "pin-project", "static_assertions", ] @@ -5618,13 +5438,14 @@ dependencies = [ [[package]] name = "sec1" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "der 0.5.1", + "base16ct", + "der", "generic-array", - "pkcs8 0.8.0", + "pkcs8", "subtle", "zeroize", ] @@ -5927,11 +5748,11 @@ dependencies = [ [[package]] name = "signature" -version = "1.3.2" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2807892cfa58e081aa1f1111391c7a0649d4fa127a4ffbe34bcbfb35a1171a4" +checksum = "deb766570a2825fa972bceff0d195727876a9cdf2460ab2e52d455dc2de47fd9" dependencies = [ - "digest 0.9.0", + "digest 0.10.3", "rand_core 0.6.3", ] @@ -6215,21 +6036,12 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" -version = "0.3.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dae7e047abc519c96350e9484a96c6bf1492348af912fd3446dd2dc323f6268" -dependencies = [ - "der 0.3.5", -] - -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der 0.5.1", + "der", ] [[package]] @@ -6354,7 +6166,7 @@ version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro2", "quote", "rustversion", @@ -6741,7 +6553,7 @@ checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ "futures-util", "log", - "pin-project 1.0.11", + "pin-project", "tokio", "tungstenite 0.14.0", ] @@ -6809,7 +6621,7 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "pin-project 1.0.11", + "pin-project", "pin-project-lite 0.2.9", "tokio", "tower-layer", @@ -6888,7 +6700,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.11", + "pin-project", "tracing", ] @@ -7174,12 +6986,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" - [[package]] name = "unicode-width" version = "0.1.9" @@ -7397,7 +7203,7 @@ dependencies = [ "mime_guess", "multipart", "percent-encoding", - "pin-project 1.0.11", + "pin-project", "scoped-tls", "serde", "serde_json", @@ -7583,7 +7389,7 @@ dependencies = [ "log", "once_cell", "parking_lot 0.12.1", - "pin-project 1.0.11", + "pin-project", "reqwest", "rlp", "secp256k1", diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 770bc4cf8c..bfc748d5b6 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -37,11 +37,11 @@ rand = "0.8.5" zeroize = { version = "1.4.2", features = ["zeroize_derive"] } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" -ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +ethers-core = "0.17.0" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", rev = "a088806575805c00d63fa59c002abc5eb1dc7709"} -ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e1188b1" } -ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } +mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" } +ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "a8110af76d97bf2bf27fb987a671808fcbdf1834" } +ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index e5af0a7499..977f0a1088 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] } +discv5 = { version = "0.1.0", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } eth2_ssz_types = "0.2.2" @@ -26,7 +26,7 @@ smallvec = "1.6.1" tokio-io-timeout = "1.1.1" lru = "0.7.1" parking_lot = "0.12.0" -sha2 = "0.9.1" +sha2 = "0.10" snap = "1.0.1" hex = "0.4.2" tokio-util = { version = "0.6.2", features = ["codec", "compat", "time"] } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 263ef0c7cb..71566b8778 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -176,7 +176,7 @@ impl Default for Config { .filter_rate_limiter(filter_rate_limiter) .filter_max_bans_per_ip(Some(5)) .filter_max_nodes_per_ip(Some(10)) - .table_filter(|enr| enr.ip().map_or(false, |ip| is_global(&ip))) // Filter non-global IPs + .table_filter(|enr| enr.ip4().map_or(false, |ip| is_global(&ip))) // Filter non-global IPs .ban_duration(Some(Duration::from_secs(3600))) .ping_interval(Duration::from_secs(300)) .build(); diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index b513ede59f..6b4b87a5f8 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -149,12 +149,12 @@ pub fn create_enr_builder_from_config( builder.ip(enr_address); } if let Some(udp_port) = config.enr_udp_port { - builder.udp(udp_port); + builder.udp4(udp_port); } // we always give it our listening tcp port if enable_tcp { let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port); - builder.tcp(tcp_port); + builder.tcp4(tcp_port); } builder } @@ -189,13 +189,13 @@ pub fn build_enr( /// If this function returns true, we use the `disk_enr`. fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { // take preference over disk_enr address if one is not specified - (local_enr.ip().is_none() || local_enr.ip() == disk_enr.ip()) + (local_enr.ip4().is_none() || local_enr.ip4() == disk_enr.ip4()) // tcp ports must match - && local_enr.tcp() == disk_enr.tcp() + && local_enr.tcp4() == disk_enr.tcp4() // must match on the same fork && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified - && (local_enr.udp().is_none() || local_enr.udp() == disk_enr.udp()) + && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, // otherwise we use a new ENR. This will likely only be true for non-validating nodes && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 1001efe231..e9cca6667a 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -48,14 +48,14 @@ impl EnrExt for Enr { /// The vector remains empty if these fields are not defined. fn multiaddr(&self) -> Vec { let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(udp) = self.udp() { + if let Some(ip) = self.ip4() { + if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); multiaddrs.push(multiaddr); } - if let Some(tcp) = self.tcp() { + if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); multiaddrs.push(multiaddr); @@ -84,15 +84,15 @@ impl EnrExt for Enr { fn multiaddr_p2p(&self) -> Vec { let peer_id = self.peer_id(); let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(udp) = self.udp() { + if let Some(ip) = self.ip4() { + if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); multiaddr.push(Protocol::P2p(peer_id.into())); multiaddrs.push(multiaddr); } - if let Some(tcp) = self.tcp() { + if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); multiaddr.push(Protocol::P2p(peer_id.into())); @@ -124,8 +124,8 @@ impl EnrExt for Enr { fn multiaddr_p2p_tcp(&self) -> Vec { let peer_id = self.peer_id(); let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(tcp) = self.tcp() { + if let Some(ip) = self.ip4() { + if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); multiaddr.push(Protocol::P2p(peer_id.into())); @@ -150,8 +150,8 @@ impl EnrExt for Enr { fn multiaddr_p2p_udp(&self) -> Vec { let peer_id = self.peer_id(); let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(udp) = self.udp() { + if let Some(ip) = self.ip4() { + if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); multiaddr.push(Protocol::P2p(peer_id.into())); @@ -173,8 +173,8 @@ impl EnrExt for Enr { /// The vector remains empty if these fields are not defined. fn multiaddr_tcp(&self) -> Vec { let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(tcp) = self.tcp() { + if let Some(ip) = self.ip4() { + if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); multiaddrs.push(multiaddr); @@ -232,6 +232,7 @@ impl CombinedKeyExt for CombinedKey { .expect("libp2p key must be valid"); Ok(CombinedKey::from(ed_keypair)) } + Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), } } } @@ -265,6 +266,10 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result Err(format!( + "Unsupported public key (Ecdsa) from peer {}", + peer_id + )), } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index d766fd23a3..3535c6bd9a 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -197,7 +197,9 @@ impl Discovery { let local_enr = network_globals.local_enr.read().clone(); - info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip" => ?local_enr.ip(), "udp"=> ?local_enr.udp(), "tcp" => ?local_enr.tcp()); + info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), + "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp6() + ); let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port); @@ -214,9 +216,9 @@ impl Discovery { "Adding node to routing table"; "node_id" => %bootnode_enr.node_id(), "peer_id" => %bootnode_enr.peer_id(), - "ip" => ?bootnode_enr.ip(), - "udp" => ?bootnode_enr.udp(), - "tcp" => ?bootnode_enr.tcp() + "ip" => ?bootnode_enr.ip4(), + "udp" => ?bootnode_enr.udp4(), + "tcp" => ?bootnode_enr.tcp4() ); let repr = bootnode_enr.to_string(); let _ = discv5.add_enr(bootnode_enr).map_err(|e| { @@ -268,9 +270,9 @@ impl Discovery { "Adding node to routing table"; "node_id" => %enr.node_id(), "peer_id" => %enr.peer_id(), - "ip" => ?enr.ip(), - "udp" => ?enr.udp(), - "tcp" => ?enr.tcp() + "ip" => ?enr.ip4(), + "udp" => ?enr.udp4(), + "tcp" => ?enr.tcp4() ); let _ = discv5.add_enr(enr).map_err(|e| { error!( @@ -763,7 +765,7 @@ impl Discovery { // we can connect to peers who aren't compatible with an upcoming fork. // `fork_digest` **must** be same. enr.eth2().map(|e| e.fork_digest) == Ok(enr_fork_id.fork_digest) - && (enr.tcp().is_some() || enr.tcp6().is_some()) + && (enr.tcp4().is_some() || enr.tcp6().is_some()) }; // General predicate @@ -1040,7 +1042,8 @@ impl NetworkBehaviour for Discovery { } Discv5Event::EnrAdded { .. } | Discv5Event::TalkRequest(_) - | Discv5Event::NodeInserted { .. } => {} // Ignore all other discv5 server events + | Discv5Event::NodeInserted { .. } + | Discv5Event::SessionEstablished { .. } => {} // Ignore all other discv5 server events } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index a19c6db657..c84e368f16 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -130,7 +130,7 @@ impl NetworkBehaviour for PeerManager { } // Check NAT if metrics are enabled - if self.network_globals.local_enr.read().udp().is_some() { + if self.network_globals.local_enr.read().udp4().is_some() { metrics::check_nat(); } diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 6b7f0bf48e..c4bf887e94 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -11,7 +11,10 @@ use types::EthSpec; pub async fn run(config: BootNodeConfig, log: slog::Logger) { // Print out useful information about the generated ENR - let enr_socket = config.local_enr.udp_socket().expect("Enr has a UDP socket"); + let enr_socket = config + .local_enr + .udp4_socket() + .expect("Enr has a UDP socket"); let eth2_field = config .local_enr .eth2() @@ -39,7 +42,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { info!( log, "Adding bootnode"; - "address" => ?enr.udp_socket(), + "address" => ?enr.udp4_socket(), "peer_id" => enr.peer_id().to_string(), "node_id" => enr.node_id().to_string() ); @@ -89,11 +92,12 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { // Ignore these events here } Discv5Event::EnrAdded { .. } => {} // Ignore - Discv5Event::TalkRequest(_) => {} // Ignore + Discv5Event::TalkRequest(_) => {} // Ignore Discv5Event::NodeInserted { .. } => {} // Ignore Discv5Event::SocketUpdated(socket_addr) => { info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr)); } + Discv5Event::SessionEstablished{ .. } => {} // Ignore } } } diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 7c3d183940..7be0e8f3d2 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -9,7 +9,7 @@ build = "build.rs" [build-dependencies] reqwest = { version = "0.11.0", features = ["blocking", "json", "native-tls-vendored"] } serde_json = "1.0.58" -sha2 = "0.9.1" +sha2 = "0.10" hex = "0.4.2" [dependencies] diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 32cee89f7f..6199005552 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -18,4 +18,4 @@ serde_yaml = "0.8.13" types = { path = "../../consensus/types"} eth2_ssz = "0.4.1" eth2_config = { path = "../eth2_config"} -enr = { version = "0.5.1", features = ["ed25519", "k256"] } +enr = { version = "0.6.2", features = ["ed25519", "k256"] } diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index 28f106fcbb..db296c70fe 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -10,7 +10,7 @@ description = "Hashing primitives used in Ethereum 2.0" lazy_static = { version = "1.4.0", optional = true } cpufeatures = { version = "0.2.5", optional = true } ring = "0.16.19" -sha2 = "0.10.2" +sha2 = "0.10" [dev-dependencies] rustc-hex = "2.1.0" diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index 43a8fe88b9..3f174a02d4 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sha2 = "0.9.1" +sha2 = "0.10" zeroize = { version = "1.4.2", features = ["zeroize_derive"] } num-bigint-dig = { version = "0.6.0", features = ["zeroize"] } ring = "0.16.19" diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index 98521c8fbb..d83a60da24 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -11,7 +11,7 @@ rand = "0.8.5" hmac = "0.11.0" pbkdf2 = { version = "0.8.0", default-features = false } scrypt = { version = "0.7.0", default-features = false } -sha2 = "0.9.1" +sha2 = "0.9.2" uuid = { version = "0.8.1", features = ["serde", "v4"] } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } serde = "1.0.116" diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 1c11ae046e..8c000bbb3d 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -56,7 +56,7 @@ impl CommandLineTestExec for CommandLineTest { fn enr_address_arg() { let mut test = CommandLineTest::new(); test.run_with_ip().with_config(|config| { - assert_eq!(config.local_enr.ip(), Some(IP_ADDRESS.parse().unwrap())); + assert_eq!(config.local_enr.ip4(), Some(IP_ADDRESS.parse().unwrap())); }); } @@ -127,7 +127,7 @@ fn enr_port_flag() { .flag("enr-port", Some(port.to_string().as_str())) .run_with_ip() .with_config(|config| { - assert_eq!(config.local_enr.udp(), Some(port)); + assert_eq!(config.local_enr.udp4(), Some(port)); }) } diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 7a8d7e99b5..a85138be95 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -15,8 +15,8 @@ execution_layer = { path = "../../beacon_node/execution_layer" } sensitive_url = { path = "../../common/sensitive_url" } types = { path = "../../consensus/types" } unused_port = { path = "../../common/unused_port" } -ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } -ethers-providers = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +ethers-core = "0.17.0" +ethers-providers = "0.17.0" deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" From e8604757a20461738686dd07dbc909834cb9c474 Mon Sep 17 00:00:00 2001 From: ethDreamer Date: Sun, 30 Oct 2022 04:04:24 +0000 Subject: [PATCH 10/37] Deposit Cache Finalization & Fast WS Sync (#2915) ## Summary The deposit cache now has the ability to finalize deposits. This will cause it to drop unneeded deposit logs and hashes in the deposit Merkle tree that are no longer required to construct deposit proofs. The cache is finalized whenever the latest finalized checkpoint has a new `Eth1Data` with all deposits imported. This has three benefits: 1. Improves the speed of constructing Merkle proofs for deposits as we can just replay deposits since the last finalized checkpoint instead of all historical deposits when re-constructing the Merkle tree. 2. Significantly faster weak subjectivity sync as the deposit cache can be transferred to the newly syncing node in compressed form. The Merkle tree that stores `N` finalized deposits requires a maximum of `log2(N)` hashes. The newly syncing node then only needs to download deposits since the last finalized checkpoint to have a full tree. 3. Future proofing in preparation for [EIP-4444](https://eips.ethereum.org/EIPS/eip-4444) as execution nodes will no longer be required to store logs permanently so we won't always have all historical logs available to us. ## More Details Image to illustrate how the deposit contract merkle tree evolves and finalizes along with the resulting `DepositTreeSnapshot` ![image](https://user-images.githubusercontent.com/37123614/151465302-5fc56284-8a69-4998-b20e-45db3934ac70.png) ## Other Considerations I've changed the structure of the `SszDepositCache` so once you load & save your database from this version of lighthouse, you will no longer be able to load it from older versions. Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> --- Cargo.lock | 3 +- beacon_node/beacon_chain/src/beacon_chain.rs | 70 +- .../beacon_chain/src/block_verification.rs | 8 + beacon_node/beacon_chain/src/builder.rs | 2 + beacon_node/beacon_chain/src/eth1_chain.rs | 25 +- .../src/eth1_finalization_cache.rs | 498 +++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/schema_change.rs | 96 +- .../src/schema_change/migration_schema_v13.rs | 150 ++++ beacon_node/beacon_chain/src/test_utils.rs | 5 +- beacon_node/client/src/builder.rs | 79 +- beacon_node/eth1/Cargo.toml | 1 + beacon_node/eth1/src/block_cache.rs | 52 +- beacon_node/eth1/src/deposit_cache.rs | 820 +++++++++++++++--- beacon_node/eth1/src/inner.rs | 46 +- beacon_node/eth1/src/lib.rs | 4 +- beacon_node/eth1/src/service.rs | 105 ++- beacon_node/eth1/tests/test.rs | 4 +- .../execution_layer/src/engine_api/http.rs | 14 +- beacon_node/genesis/src/common.rs | 4 +- .../genesis/src/eth1_genesis_service.rs | 2 +- beacon_node/http_api/src/lib.rs | 48 + beacon_node/store/src/metadata.rs | 2 +- common/eth2/src/lib.rs | 16 + common/eth2/src/lighthouse.rs | 18 +- consensus/merkle_proof/src/lib.rs | 185 +++- consensus/ssz/src/decode/impls.rs | 14 + consensus/ssz/src/encode/impls.rs | 36 + consensus/ssz/tests/tests.rs | 18 + .../src/common/deposit_data_tree.rs | 57 +- consensus/types/Cargo.toml | 2 +- consensus/types/src/deposit_tree_snapshot.rs | 83 ++ consensus/types/src/lib.rs | 2 + database_manager/src/lib.rs | 1 + validator_client/src/lib.rs | 2 + 35 files changed, 2302 insertions(+), 171 deletions(-) create mode 100644 beacon_node/beacon_chain/src/eth1_finalization_cache.rs create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs create mode 100644 consensus/types/src/deposit_tree_snapshot.rs diff --git a/Cargo.lock b/Cargo.lock index 04cfd42350..6d65ccb48c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1638,6 +1638,7 @@ dependencies = [ "slog", "sloggers", "state_processing", + "superstruct", "task_executor", "tokio", "tree_hash", @@ -6884,7 +6885,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "types" -version = "0.2.0" +version = "0.2.1" dependencies = [ "arbitrary", "beacon_chain", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 86b43a1a39..b23dd30de0 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -16,6 +16,7 @@ use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; +use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; @@ -117,6 +118,9 @@ pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// validator pubkey cache. pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); +/// The timeout for the eth1 finalization cache +pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); + // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -359,6 +363,8 @@ pub struct BeaconChain { pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: TimeoutRwLock, + /// A cache of eth1 deposit data at epoch boundaries for deposit finalization + pub eth1_finalization_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. @@ -2531,9 +2537,10 @@ impl BeaconChain { block, block_root, state, - parent_block: _, + parent_block, confirmed_state_roots, payload_verification_handle, + parent_eth1_finalization_data, } = execution_pending_block; let PayloadVerificationOutcome { @@ -2585,6 +2592,8 @@ impl BeaconChain { confirmed_state_roots, payload_verification_status, count_unrealized, + parent_block, + parent_eth1_finalization_data, ) }, "payload_verification_handle", @@ -2599,6 +2608,7 @@ impl BeaconChain { /// /// An error is returned if the block was unable to be imported. It may be partially imported /// (i.e., this function is not atomic). + #[allow(clippy::too_many_arguments)] fn import_block( &self, signed_block: Arc>, @@ -2607,6 +2617,8 @@ impl BeaconChain { confirmed_state_roots: Vec, payload_verification_status: PayloadVerificationStatus, count_unrealized: CountUnrealized, + parent_block: SignedBlindedBeaconBlock, + parent_eth1_finalization_data: Eth1FinalizationData, ) -> Result> { let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); @@ -2987,6 +2999,11 @@ impl BeaconChain { let parent_root = block.parent_root(); let slot = block.slot(); + let current_eth1_finalization_data = Eth1FinalizationData { + eth1_data: state.eth1_data().clone(), + eth1_deposit_index: state.eth1_deposit_index(), + }; + let current_finalized_checkpoint = state.finalized_checkpoint(); self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .ok_or(Error::SnapshotCacheLockTimeout) @@ -3060,6 +3077,57 @@ impl BeaconChain { ); } + // Do not write to eth1 finalization cache for blocks older than 5 epochs + // this helps reduce noise during sync + if block_delay_total + < self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32) + { + let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch()); + if parent_block_epoch < current_epoch { + // we've crossed epoch boundary, store Eth1FinalizationData + let (checkpoint, eth1_finalization_data) = + if current_slot % T::EthSpec::slots_per_epoch() == 0 { + // current block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: block_root, + }, + current_eth1_finalization_data, + ) + } else { + // parent block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: parent_block.canonical_root(), + }, + parent_eth1_finalization_data, + ) + }; + + if let Some(finalized_eth1_data) = self + .eth1_finalization_cache + .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) + .and_then(|mut cache| { + cache.insert(checkpoint, eth1_finalization_data); + cache.finalize(¤t_finalized_checkpoint) + }) + { + if let Some(eth1_chain) = self.eth1_chain.as_ref() { + let finalized_deposit_count = finalized_eth1_data.deposit_count; + eth1_chain.finalize_eth1_data(finalized_eth1_data); + debug!( + self.log, + "called eth1_chain.finalize_eth1_data()"; + "epoch" => current_finalized_checkpoint.epoch, + "deposit count" => finalized_deposit_count, + ); + } + } + } + } + // Inform the unknown block cache, in case it was waiting on this block. self.pre_finalization_block_cache .block_processed(block_root); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 7f59f1cfec..104de57dbf 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -42,6 +42,7 @@ //! END //! //! ``` +use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, PayloadNotifier, @@ -622,6 +623,7 @@ pub struct ExecutionPendingBlock { pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, + pub parent_eth1_finalization_data: Eth1FinalizationData, pub confirmed_state_roots: Vec, pub payload_verification_handle: PayloadVerificationHandle, } @@ -1164,6 +1166,11 @@ impl ExecutionPendingBlock { .into()); } + let parent_eth1_finalization_data = Eth1FinalizationData { + eth1_data: state.eth1_data().clone(), + eth1_deposit_index: state.eth1_deposit_index(), + }; + let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { let state_root = if parent.beacon_block.slot() == state.slot() { @@ -1419,6 +1426,7 @@ impl ExecutionPendingBlock { block_root, state, parent_block: parent.beacon_block, + parent_eth1_finalization_data, confirmed_state_roots, payload_verification_handle, }) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 051b84f816..58bbb2b5c6 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,5 +1,6 @@ use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; +use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::head_tracker::HeadTracker; @@ -795,6 +796,7 @@ where head_for_snapshot_cache, )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), + eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), beacon_proposer_cache: <_>::default(), block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 3d24becc84..25971bf85b 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -16,7 +16,6 @@ use store::{DBColumn, Error as StoreError, StoreItem}; use task_executor::TaskExecutor; use types::{ BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, - DEPOSIT_TREE_DEPTH, }; type BlockNumber = u64; @@ -170,8 +169,8 @@ fn get_sync_status( #[derive(Encode, Decode, Clone)] pub struct SszEth1 { - use_dummy_backend: bool, - backend_bytes: Vec, + pub use_dummy_backend: bool, + pub backend_bytes: Vec, } impl StoreItem for SszEth1 { @@ -305,6 +304,12 @@ where } } + /// Set in motion the finalization of `Eth1Data`. This method is called during block import + /// so it should be fast. + pub fn finalize_eth1_data(&self, eth1_data: Eth1Data) { + self.backend.finalize_eth1_data(eth1_data); + } + /// Consumes `self`, returning the backend. pub fn into_backend(self) -> T { self.backend @@ -335,6 +340,10 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { /// beacon node eth1 cache is. fn latest_cached_block(&self) -> Option; + /// Set in motion the finalization of `Eth1Data`. This method is called during block import + /// so it should be fast. + fn finalize_eth1_data(&self, eth1_data: Eth1Data); + /// Returns the block at the head of the chain (ignoring follow distance, etc). Used to obtain /// an idea of how up-to-date the remote eth1 node is. fn head_block(&self) -> Option; @@ -389,6 +398,8 @@ impl Eth1ChainBackend for DummyEth1ChainBackend { None } + fn finalize_eth1_data(&self, _eth1_data: Eth1Data) {} + fn head_block(&self) -> Option { None } @@ -547,7 +558,7 @@ impl Eth1ChainBackend for CachingEth1Backend { .deposits() .read() .cache - .get_deposits(next, last, deposit_count, DEPOSIT_TREE_DEPTH) + .get_deposits(next, last, deposit_count) .map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e))) .map(|(_deposit_root, deposits)| deposits) } @@ -558,6 +569,12 @@ impl Eth1ChainBackend for CachingEth1Backend { self.core.latest_cached_block() } + /// This only writes the eth1_data to a temporary cache so that the service + /// thread can later do the actual finalizing of the deposit tree. + fn finalize_eth1_data(&self, eth1_data: Eth1Data) { + self.core.set_to_finalize(Some(eth1_data)); + } + fn head_block(&self) -> Option { self.core.head_block() } diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs new file mode 100644 index 0000000000..7cf805a126 --- /dev/null +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -0,0 +1,498 @@ +use slog::{debug, Logger}; +use std::cmp; +use std::collections::BTreeMap; +use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root}; + +/// The default size of the cache. +/// The beacon chain only looks at the last 4 epochs for finalization. +/// Add 1 for current epoch and 4 earlier epochs. +pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5; + +/// These fields are named the same as the corresponding fields in the `BeaconState` +/// as this structure stores these values from the `BeaconState` at a `Checkpoint` +#[derive(Clone)] +pub struct Eth1FinalizationData { + pub eth1_data: Eth1Data, + pub eth1_deposit_index: u64, +} + +impl Eth1FinalizationData { + /// Ensures the deposit finalization conditions have been met. See: + /// https://eips.ethereum.org/EIPS/eip-4881#deposit-finalization-conditions + fn fully_imported(&self) -> bool { + self.eth1_deposit_index >= self.eth1_data.deposit_count + } +} + +/// Implements map from Checkpoint -> Eth1CacheData +pub struct CheckpointMap { + capacity: usize, + // There shouldn't be more than a couple of potential checkpoints at the same + // epoch. Searching through a vector for the matching Root should be faster + // than using another map from Root->Eth1CacheData + store: BTreeMap>, +} + +impl Default for CheckpointMap { + fn default() -> Self { + Self::new() + } +} + +/// Provides a map of `Eth1CacheData` referenced by `Checkpoint` +/// +/// ## Cache Queuing +/// +/// The cache keeps a maximum number of (`capacity`) epochs. Because there may be +/// forks at the epoch boundary, it's possible that there exists more than one +/// `Checkpoint` for the same `Epoch`. This cache will store all checkpoints for +/// a given `Epoch`. When adding data for a new `Checkpoint` would cause the number +/// of `Epoch`s stored to exceed `capacity`, the data for oldest `Epoch` is dropped +impl CheckpointMap { + pub fn new() -> Self { + CheckpointMap { + capacity: DEFAULT_ETH1_CACHE_SIZE, + store: BTreeMap::new(), + } + } + + pub fn with_capacity(capacity: usize) -> Self { + CheckpointMap { + capacity: cmp::max(1, capacity), + store: BTreeMap::new(), + } + } + + pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { + self.store + .entry(checkpoint.epoch) + .or_insert_with(Vec::new) + .push((checkpoint.root, eth1_finalization_data)); + + // faster to reduce size after the fact than do pre-checking to see + // if the current data would increase the size of the BTreeMap + while self.store.len() > self.capacity { + let oldest_stored_epoch = self.store.keys().next().cloned().unwrap(); + self.store.remove(&oldest_stored_epoch); + } + } + + pub fn get(&self, checkpoint: &Checkpoint) -> Option<&Eth1FinalizationData> { + match self.store.get(&checkpoint.epoch) { + Some(vec) => { + for (root, data) in vec { + if *root == checkpoint.root { + return Some(data); + } + } + None + } + None => None, + } + } + + #[cfg(test)] + pub fn len(&self) -> usize { + self.store.len() + } +} + +/// This cache stores `Eth1CacheData` that could potentially be finalized within 4 +/// future epochs. +pub struct Eth1FinalizationCache { + by_checkpoint: CheckpointMap, + pending_eth1: BTreeMap, + last_finalized: Option, + log: Logger, +} + +/// Provides a cache of `Eth1CacheData` at epoch boundaries. This is used to +/// finalize deposits when a new epoch is finalized. +/// +impl Eth1FinalizationCache { + pub fn new(log: Logger) -> Self { + Eth1FinalizationCache { + by_checkpoint: CheckpointMap::new(), + pending_eth1: BTreeMap::new(), + last_finalized: None, + log, + } + } + + pub fn with_capacity(log: Logger, capacity: usize) -> Self { + Eth1FinalizationCache { + by_checkpoint: CheckpointMap::with_capacity(capacity), + pending_eth1: BTreeMap::new(), + last_finalized: None, + log, + } + } + + pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { + if !eth1_finalization_data.fully_imported() { + self.pending_eth1.insert( + eth1_finalization_data.eth1_data.deposit_count, + eth1_finalization_data.eth1_data.clone(), + ); + debug!( + self.log, + "Eth1Cache: inserted pending eth1"; + "eth1_data.deposit_count" => eth1_finalization_data.eth1_data.deposit_count, + "eth1_deposit_index" => eth1_finalization_data.eth1_deposit_index, + ); + } + self.by_checkpoint + .insert(checkpoint, eth1_finalization_data); + } + + pub fn finalize(&mut self, checkpoint: &Checkpoint) -> Option { + if let Some(eth1_finalized_data) = self.by_checkpoint.get(checkpoint) { + let finalized_deposit_index = eth1_finalized_data.eth1_deposit_index; + let mut result = None; + while let Some(pending_count) = self.pending_eth1.keys().next().cloned() { + if finalized_deposit_index >= pending_count { + result = self.pending_eth1.remove(&pending_count); + debug!( + self.log, + "Eth1Cache: dropped pending eth1"; + "pending_count" => pending_count, + "finalized_deposit_index" => finalized_deposit_index, + ); + } else { + break; + } + } + if eth1_finalized_data.fully_imported() { + result = Some(eth1_finalized_data.eth1_data.clone()) + } + if result.is_some() { + self.last_finalized = result; + } + self.last_finalized.clone() + } else { + debug!( + self.log, + "Eth1Cache: cache miss"; + "epoch" => checkpoint.epoch, + ); + None + } + } + + #[cfg(test)] + pub fn by_checkpoint(&self) -> &CheckpointMap { + &self.by_checkpoint + } + + #[cfg(test)] + pub fn pending_eth1(&self) -> &BTreeMap { + &self.pending_eth1 + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use sloggers::null::NullLoggerBuilder; + use sloggers::Build; + use std::collections::HashMap; + + const SLOTS_PER_EPOCH: u64 = 32; + const MAX_DEPOSITS: u64 = 16; + const EPOCHS_PER_ETH1_VOTING_PERIOD: u64 = 64; + + fn eth1cache() -> Eth1FinalizationCache { + let log_builder = NullLoggerBuilder; + Eth1FinalizationCache::new(log_builder.build().expect("should build log")) + } + + fn random_eth1_data(deposit_count: u64) -> Eth1Data { + Eth1Data { + deposit_root: Root::random(), + deposit_count, + block_hash: Root::random(), + } + } + + fn random_checkpoint(epoch: u64) -> Checkpoint { + Checkpoint { + epoch: epoch.into(), + root: Root::random(), + } + } + + fn random_checkpoints(n: usize) -> Vec { + let mut result = Vec::with_capacity(n); + for epoch in 0..n { + result.push(random_checkpoint(epoch as u64)) + } + result + } + + #[test] + fn fully_imported_deposits() { + let epochs = 16; + let deposits_imported = 128; + + let eth1data = random_eth1_data(deposits_imported); + let checkpoints = random_checkpoints(epochs as usize); + let mut eth1cache = eth1cache(); + + for epoch in 4..epochs { + assert_eq!( + eth1cache.by_checkpoint().len(), + cmp::min((epoch - 4) as usize, DEFAULT_ETH1_CACHE_SIZE), + "Unexpected cache size" + ); + + let checkpoint = checkpoints + .get(epoch as usize) + .expect("should get checkpoint"); + eth1cache.insert( + *checkpoint, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + + let finalized_checkpoint = checkpoints + .get((epoch - 4) as usize) + .expect("should get finalized checkpoint"); + assert!( + eth1cache.pending_eth1().is_empty(), + "Deposits are fully imported so pending cache should be empty" + ); + if epoch < 8 { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + None, + "Should have cache miss" + ); + } else { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + Some(eth1data.clone()), + "Should have cache hit" + ) + } + } + } + + #[test] + fn partially_imported_deposits() { + let epochs = 16; + let initial_deposits_imported = 1024; + let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; + let full_import_epoch = 13; + let total_deposits = + initial_deposits_imported + deposits_imported_per_epoch * full_import_epoch; + + let eth1data = random_eth1_data(total_deposits); + let checkpoints = random_checkpoints(epochs as usize); + let mut eth1cache = eth1cache(); + + for epoch in 0..epochs { + assert_eq!( + eth1cache.by_checkpoint().len(), + cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), + "Unexpected cache size" + ); + + let checkpoint = checkpoints + .get(epoch as usize) + .expect("should get checkpoint"); + let deposits_imported = cmp::min( + total_deposits, + initial_deposits_imported + deposits_imported_per_epoch * epoch, + ); + eth1cache.insert( + *checkpoint, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + + if epoch >= 4 { + let finalized_epoch = epoch - 4; + let finalized_checkpoint = checkpoints + .get(finalized_epoch as usize) + .expect("should get finalized checkpoint"); + if finalized_epoch < full_import_epoch { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + None, + "Deposits not fully finalized so cache should return no Eth1Data", + ); + assert_eq!( + eth1cache.pending_eth1().len(), + 1, + "Deposits not fully finalized. Pending eth1 cache should have 1 entry" + ); + } else { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + Some(eth1data.clone()), + "Deposits fully imported and finalized. Cache should return Eth1Data. finalized_deposits[{}]", + (initial_deposits_imported + deposits_imported_per_epoch * finalized_epoch), + ); + assert!( + eth1cache.pending_eth1().is_empty(), + "Deposits fully imported and finalized. Pending cache should be empty" + ); + } + } + } + } + + #[test] + fn fork_at_epoch_boundary() { + let epochs = 12; + let deposits_imported = 128; + + let eth1data = random_eth1_data(deposits_imported); + let checkpoints = random_checkpoints(epochs as usize); + let mut forks = HashMap::new(); + let mut eth1cache = eth1cache(); + + for epoch in 0..epochs { + assert_eq!( + eth1cache.by_checkpoint().len(), + cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), + "Unexpected cache size" + ); + + let checkpoint = checkpoints + .get(epoch as usize) + .expect("should get checkpoint"); + eth1cache.insert( + *checkpoint, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + // lets put a fork at every third epoch + if epoch % 3 == 0 { + let fork = random_checkpoint(epoch); + eth1cache.insert( + fork, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + forks.insert(epoch as usize, fork); + } + + assert!( + eth1cache.pending_eth1().is_empty(), + "Deposits are fully imported so pending cache should be empty" + ); + if epoch >= 4 { + let finalized_epoch = (epoch - 4) as usize; + let finalized_checkpoint = if finalized_epoch % 3 == 0 { + forks.get(&finalized_epoch).expect("should get fork") + } else { + checkpoints + .get(finalized_epoch) + .expect("should get checkpoint") + }; + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + Some(eth1data.clone()), + "Should have cache hit" + ); + if finalized_epoch >= 3 { + let dropped_epoch = finalized_epoch - 3; + if let Some(dropped_checkpoint) = forks.get(&dropped_epoch) { + // got checkpoint for an old fork that should no longer + // be in the cache because it is from too long ago + assert_eq!( + eth1cache.finalize(dropped_checkpoint), + None, + "Should have cache miss" + ); + } + } + } + } + } + + #[test] + fn massive_deposit_queue() { + // Simulating a situation where deposits don't get imported within an eth1 voting period + let eth1_voting_periods = 8; + let initial_deposits_imported = 1024; + let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; + let initial_deposit_queue = + deposits_imported_per_epoch * EPOCHS_PER_ETH1_VOTING_PERIOD * 2 + 32; + let new_deposits_per_voting_period = + EPOCHS_PER_ETH1_VOTING_PERIOD * deposits_imported_per_epoch / 2; + + let mut epoch_data = BTreeMap::new(); + let mut eth1s_by_count = BTreeMap::new(); + let mut eth1cache = eth1cache(); + let mut last_period_deposits = initial_deposits_imported; + for period in 0..eth1_voting_periods { + let period_deposits = initial_deposits_imported + + initial_deposit_queue + + period * new_deposits_per_voting_period; + let period_eth1_data = random_eth1_data(period_deposits); + eth1s_by_count.insert(period_eth1_data.deposit_count, period_eth1_data.clone()); + + for epoch_mod_period in 0..EPOCHS_PER_ETH1_VOTING_PERIOD { + let epoch = period * EPOCHS_PER_ETH1_VOTING_PERIOD + epoch_mod_period; + let checkpoint = random_checkpoint(epoch); + let deposits_imported = cmp::min( + period_deposits, + last_period_deposits + deposits_imported_per_epoch * epoch_mod_period, + ); + eth1cache.insert( + checkpoint, + Eth1FinalizationData { + eth1_data: period_eth1_data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + epoch_data.insert(epoch, (checkpoint, deposits_imported)); + + if epoch >= 4 { + let finalized_epoch = epoch - 4; + let (finalized_checkpoint, finalized_deposits) = epoch_data + .get(&finalized_epoch) + .expect("should get epoch data"); + + let pending_eth1s = eth1s_by_count.range((finalized_deposits + 1)..).count(); + let last_finalized_eth1 = eth1s_by_count + .range(0..(finalized_deposits + 1)) + .map(|(_, eth1)| eth1) + .last() + .cloned(); + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + last_finalized_eth1, + "finalized checkpoint mismatch", + ); + assert_eq!( + eth1cache.pending_eth1().len(), + pending_eth1s, + "pending eth1 mismatch" + ); + } + } + + // remove unneeded stuff from old epochs + while epoch_data.len() > DEFAULT_ETH1_CACHE_SIZE { + let oldest_stored_epoch = epoch_data + .keys() + .next() + .cloned() + .expect("should get oldest epoch"); + epoch_data.remove(&oldest_stored_epoch); + } + last_period_deposits = period_deposits; + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fbcd8f7fb7..5ead5311e5 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -14,6 +14,7 @@ pub mod chain_config; mod early_attester_cache; mod errors; pub mod eth1_chain; +mod eth1_finalization_cache; pub mod events; pub mod execution_payload; pub mod fork_choice_signal; diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 15b0f39f3a..3ee77f7bbd 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -2,13 +2,15 @@ mod migration_schema_v10; mod migration_schema_v11; mod migration_schema_v12; +mod migration_schema_v13; mod migration_schema_v6; mod migration_schema_v7; mod migration_schema_v8; mod migration_schema_v9; mod types; -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; +use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY}; +use crate::eth1_chain::SszEth1; use crate::persisted_fork_choice::{ PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7, PersistedForkChoiceV8, @@ -24,6 +26,7 @@ use store::{Error as StoreError, StoreItem}; /// Migrate the database from one schema version to another, applying all requisite mutations. pub fn migrate_schema( db: Arc>, + deposit_contract_deploy_block: u64, datadir: &Path, from: SchemaVersion, to: SchemaVersion, @@ -31,19 +34,51 @@ pub fn migrate_schema( spec: &ChainSpec, ) -> Result<(), StoreError> { match (from, to) { - // Migrating from the current schema version to iself is always OK, a no-op. + // Migrating from the current schema version to itself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; - migrate_schema::(db, datadir, next, to, log, spec) + migrate_schema::( + db.clone(), + deposit_contract_deploy_block, + datadir, + from, + next, + log.clone(), + spec, + )?; + migrate_schema::( + db, + deposit_contract_deploy_block, + datadir, + next, + to, + log, + spec, + ) } // Downgrade across multiple versions by recursively migrating one step at a time. (_, _) if to.as_u64() + 1 < from.as_u64() => { let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; - migrate_schema::(db, datadir, next, to, log, spec) + migrate_schema::( + db.clone(), + deposit_contract_deploy_block, + datadir, + from, + next, + log.clone(), + spec, + )?; + migrate_schema::( + db, + deposit_contract_deploy_block, + datadir, + next, + to, + log, + spec, + ) } // @@ -207,6 +242,55 @@ pub fn migrate_schema( let ops = migration_schema_v12::downgrade_from_v12::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(12), SchemaVersion(13)) => { + let mut ops = vec![]; + if let Some(persisted_eth1_v1) = db.get_item::(Ð1_CACHE_DB_KEY)? { + let upgraded_eth1_cache = + match migration_schema_v13::update_eth1_cache(persisted_eth1_v1) { + Ok(upgraded_eth1) => upgraded_eth1, + Err(e) => { + warn!(log, "Failed to deserialize SszEth1CacheV1"; "error" => ?e); + warn!(log, "Reinitializing eth1 cache"); + migration_schema_v13::reinitialized_eth1_cache_v13( + deposit_contract_deploy_block, + ) + } + }; + ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + (SchemaVersion(13), SchemaVersion(12)) => { + let mut ops = vec![]; + if let Some(persisted_eth1_v13) = db.get_item::(Ð1_CACHE_DB_KEY)? { + let downgraded_eth1_cache = match migration_schema_v13::downgrade_eth1_cache( + persisted_eth1_v13, + ) { + Ok(Some(downgraded_eth1)) => downgraded_eth1, + Ok(None) => { + warn!(log, "Unable to downgrade eth1 cache from newer version: reinitializing eth1 cache"); + migration_schema_v13::reinitialized_eth1_cache_v1( + deposit_contract_deploy_block, + ) + } + Err(e) => { + warn!(log, "Unable to downgrade eth1 cache from newer version: failed to deserialize SszEth1CacheV13"; "error" => ?e); + warn!(log, "Reinitializing eth1 cache"); + migration_schema_v13::reinitialized_eth1_cache_v1( + deposit_contract_deploy_block, + ) + } + }; + ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs new file mode 100644 index 0000000000..d4ac974603 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs @@ -0,0 +1,150 @@ +use crate::eth1_chain::SszEth1; +use eth1::{BlockCache, SszDepositCacheV1, SszDepositCacheV13, SszEth1CacheV1, SszEth1CacheV13}; +use ssz::{Decode, Encode}; +use state_processing::common::DepositDataTree; +use store::Error; +use types::DEPOSIT_TREE_DEPTH; + +pub fn update_eth1_cache(persisted_eth1_v1: SszEth1) -> Result { + if persisted_eth1_v1.use_dummy_backend { + // backend_bytes is empty when using dummy backend + return Ok(persisted_eth1_v1); + } + + let SszEth1 { + use_dummy_backend, + backend_bytes, + } = persisted_eth1_v1; + + let ssz_eth1_cache_v1 = SszEth1CacheV1::from_ssz_bytes(&backend_bytes)?; + let SszEth1CacheV1 { + block_cache, + deposit_cache: deposit_cache_v1, + last_processed_block, + } = ssz_eth1_cache_v1; + + let SszDepositCacheV1 { + logs, + leaves, + deposit_contract_deploy_block, + deposit_roots, + } = deposit_cache_v1; + + let deposit_cache_v13 = SszDepositCacheV13 { + logs, + leaves, + deposit_contract_deploy_block, + finalized_deposit_count: 0, + finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), + deposit_tree_snapshot: None, + deposit_roots, + }; + + let ssz_eth1_cache_v13 = SszEth1CacheV13 { + block_cache, + deposit_cache: deposit_cache_v13, + last_processed_block, + }; + + let persisted_eth1_v13 = SszEth1 { + use_dummy_backend, + backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), + }; + + Ok(persisted_eth1_v13) +} + +pub fn downgrade_eth1_cache(persisted_eth1_v13: SszEth1) -> Result, Error> { + if persisted_eth1_v13.use_dummy_backend { + // backend_bytes is empty when using dummy backend + return Ok(Some(persisted_eth1_v13)); + } + + let SszEth1 { + use_dummy_backend, + backend_bytes, + } = persisted_eth1_v13; + + let ssz_eth1_cache_v13 = SszEth1CacheV13::from_ssz_bytes(&backend_bytes)?; + let SszEth1CacheV13 { + block_cache, + deposit_cache: deposit_cache_v13, + last_processed_block, + } = ssz_eth1_cache_v13; + + let SszDepositCacheV13 { + logs, + leaves, + deposit_contract_deploy_block, + finalized_deposit_count, + finalized_block_height: _, + deposit_tree_snapshot, + deposit_roots, + } = deposit_cache_v13; + + if finalized_deposit_count == 0 && deposit_tree_snapshot.is_none() { + // This tree was never finalized and can be directly downgraded to v1 without re-initializing + let deposit_cache_v1 = SszDepositCacheV1 { + logs, + leaves, + deposit_contract_deploy_block, + deposit_roots, + }; + let ssz_eth1_cache_v1 = SszEth1CacheV1 { + block_cache, + deposit_cache: deposit_cache_v1, + last_processed_block, + }; + return Ok(Some(SszEth1 { + use_dummy_backend, + backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), + })); + } + // deposit cache was finalized; can't downgrade + Ok(None) +} + +pub fn reinitialized_eth1_cache_v13(deposit_contract_deploy_block: u64) -> SszEth1 { + let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); + let deposit_cache_v13 = SszDepositCacheV13 { + logs: vec![], + leaves: vec![], + deposit_contract_deploy_block, + finalized_deposit_count: 0, + finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), + deposit_tree_snapshot: empty_tree.get_snapshot(), + deposit_roots: vec![empty_tree.root()], + }; + + let ssz_eth1_cache_v13 = SszEth1CacheV13 { + block_cache: BlockCache::default(), + deposit_cache: deposit_cache_v13, + last_processed_block: None, + }; + + SszEth1 { + use_dummy_backend: false, + backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), + } +} + +pub fn reinitialized_eth1_cache_v1(deposit_contract_deploy_block: u64) -> SszEth1 { + let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); + let deposit_cache_v1 = SszDepositCacheV1 { + logs: vec![], + leaves: vec![], + deposit_contract_deploy_block, + deposit_roots: vec![empty_tree.root()], + }; + + let ssz_eth1_cache_v1 = SszEth1CacheV1 { + block_cache: BlockCache::default(), + deposit_cache: deposit_cache_v1, + last_processed_block: None, + }; + + SszEth1 { + use_dummy_backend: false, + backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d5a8880381..3b4a62f5a9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1432,8 +1432,9 @@ where // Building proofs let mut proofs = vec![]; for i in 0..leaves.len() { - let (_, mut proof) = - tree.generate_proof(i, self.spec.deposit_contract_tree_depth as usize); + let (_, mut proof) = tree + .generate_proof(i, self.spec.deposit_contract_tree_depth as usize) + .expect("should generate proof"); proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64))); proofs.push(proof); } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index efd91cfdf6..c89980e6e8 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -277,8 +277,52 @@ where BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT)); let slots_per_epoch = TEthSpec::slots_per_epoch(); - debug!(context.log(), "Downloading finalized block"); + let deposit_snapshot = if config.sync_eth1_chain { + // We want to fetch deposit snapshot before fetching the finalized beacon state to + // ensure that the snapshot is not newer than the beacon state that satisfies the + // deposit finalization conditions + debug!(context.log(), "Downloading deposit snapshot"); + let deposit_snapshot_result = remote + .get_deposit_snapshot() + .await + .map_err(|e| match e { + ApiError::InvalidSsz(e) => format!( + "Unable to parse SSZ: {:?}. Ensure the checkpoint-sync-url refers to a \ + node for the correct network", + e + ), + e => format!("Error fetching deposit snapshot from remote: {:?}", e), + }); + match deposit_snapshot_result { + Ok(Some(deposit_snapshot)) => { + if deposit_snapshot.is_valid() { + Some(deposit_snapshot) + } else { + warn!(context.log(), "Remote BN sent invalid deposit snapshot!"); + None + } + } + Ok(None) => { + warn!( + context.log(), + "Remote BN does not support EIP-4881 fast deposit sync" + ); + None + } + Err(e) => { + warn!( + context.log(), + "Remote BN does not support EIP-4881 fast deposit sync"; + "error" => e + ); + None + } + } + } else { + None + }; + debug!(context.log(), "Downloading finalized block"); // Find a suitable finalized block on an epoch boundary. let mut block = remote .get_beacon_blocks_ssz::(BlockId::Finalized, &spec) @@ -362,9 +406,33 @@ where "state_root" => ?state_root, ); + let service = + deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot( + config.eth1, + context.log().clone(), + spec, + &snapshot, + ) { + Ok(service) => { + info!( + context.log(), + "Loaded deposit tree snapshot"; + "deposits loaded" => snapshot.deposit_count, + ); + Some(service) + } + Err(e) => { + warn!(context.log(), + "Unable to load deposit snapshot"; + "error" => ?e + ); + None + } + }); + builder .weak_subjectivity_state(state, block, genesis_state) - .map(|v| (v, None))? + .map(|v| (v, service))? } ClientGenesis::DepositContract => { info!( @@ -810,9 +878,16 @@ where self.freezer_db_path = Some(cold_path.into()); let inner_spec = spec.clone(); + let deposit_contract_deploy_block = context + .eth2_network_config + .as_ref() + .map(|config| config.deposit_contract_deploy_block) + .unwrap_or(0); + let schema_upgrade = |db, from, to| { migrate_schema::>( db, + deposit_contract_deploy_block, datadir, from, to, diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 930301256c..7e99c43e7d 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -25,6 +25,7 @@ eth2_ssz_derive = "0.3.0" tree_hash = "0.4.1" parking_lot = "0.12.0" slog = "2.5.2" +superstruct = "0.5.0" tokio = { version = "1.14.0", features = ["full"] } state_processing = { path = "../../consensus/state_processing" } lighthouse_metrics = { path = "../../common/lighthouse_metrics"} diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index 5999944f4a..26e160115e 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -1,7 +1,10 @@ use ssz_derive::{Decode, Encode}; +use std::collections::HashMap; use std::ops::RangeInclusive; pub use eth2::lighthouse::Eth1Block; +use eth2::types::Hash256; +use std::sync::Arc; #[derive(Debug, PartialEq, Clone)] pub enum Error { @@ -20,7 +23,9 @@ pub enum Error { /// timestamp. #[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] pub struct BlockCache { - blocks: Vec, + blocks: Vec>, + #[ssz(skip_serializing, skip_deserializing)] + by_hash: HashMap>, } impl BlockCache { @@ -36,12 +41,12 @@ impl BlockCache { /// Returns the earliest (lowest timestamp) block, if any. pub fn earliest_block(&self) -> Option<&Eth1Block> { - self.blocks.first() + self.blocks.first().map(|ptr| ptr.as_ref()) } /// Returns the latest (highest timestamp) block, if any. pub fn latest_block(&self) -> Option<&Eth1Block> { - self.blocks.last() + self.blocks.last().map(|ptr| ptr.as_ref()) } /// Returns the timestamp of the earliest block in the cache (if any). @@ -71,7 +76,7 @@ impl BlockCache { /// - Monotonically increasing block numbers. /// - Non-uniformly increasing block timestamps. pub fn iter(&self) -> impl DoubleEndedIterator + Clone { - self.blocks.iter() + self.blocks.iter().map(|ptr| ptr.as_ref()) } /// Shortens the cache, keeping the latest (by block number) `len` blocks while dropping the @@ -80,7 +85,11 @@ impl BlockCache { /// If `len` is greater than the vector's current length, this has no effect. pub fn truncate(&mut self, len: usize) { if len < self.blocks.len() { - self.blocks = self.blocks.split_off(self.blocks.len() - len); + let remaining = self.blocks.split_off(self.blocks.len() - len); + for block in &self.blocks { + self.by_hash.remove(&block.hash); + } + self.blocks = remaining; } } @@ -92,12 +101,27 @@ impl BlockCache { /// Returns a block with the corresponding number, if any. pub fn block_by_number(&self, block_number: u64) -> Option<&Eth1Block> { - self.blocks.get( - self.blocks - .as_slice() - .binary_search_by(|block| block.number.cmp(&block_number)) - .ok()?, - ) + self.blocks + .get( + self.blocks + .as_slice() + .binary_search_by(|block| block.number.cmp(&block_number)) + .ok()?, + ) + .map(|ptr| ptr.as_ref()) + } + + /// Returns a block with the corresponding hash, if any. + pub fn block_by_hash(&self, block_hash: &Hash256) -> Option<&Eth1Block> { + self.by_hash.get(block_hash).map(|ptr| ptr.as_ref()) + } + + /// Rebuilds the by_hash map + pub fn rebuild_by_hash_map(&mut self) { + self.by_hash.clear(); + for block in self.blocks.iter() { + self.by_hash.insert(block.hash, block.clone()); + } } /// Insert an `Eth1Snapshot` into `self`, allowing future queries. @@ -161,7 +185,9 @@ impl BlockCache { } } - self.blocks.push(block); + let ptr = Arc::new(block); + self.by_hash.insert(ptr.hash, ptr.clone()); + self.blocks.push(ptr); Ok(()) } @@ -269,6 +295,8 @@ mod tests { .expect("should add consecutive blocks with duplicate timestamps"); } + let blocks = blocks.into_iter().map(Arc::new).collect::>(); + assert_eq!(cache.blocks, blocks, "should have added all blocks"); } } diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index 078e3602f5..ab07b380d1 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -1,9 +1,10 @@ -use execution_layer::http::deposit_log::DepositLog; +use crate::{DepositLog, Eth1Block}; use ssz_derive::{Decode, Encode}; use state_processing::common::DepositDataTree; use std::cmp::Ordering; +use superstruct::superstruct; use tree_hash::TreeHash; -use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH}; +use types::{Deposit, DepositTreeSnapshot, Hash256, DEPOSIT_TREE_DEPTH}; #[derive(Debug, PartialEq)] pub enum Error { @@ -21,22 +22,53 @@ pub enum Error { /// A log with the given index is already present in the cache and it does not match the one /// provided. DuplicateDistinctLog(u64), + /// Attempted to insert log with given index after the log had been finalized + FinalizedLogInsert { + log_index: u64, + finalized_index: u64, + }, /// The deposit count must always be large enough to account for the requested deposit range. /// /// E.g., you cannot request deposit 10 when the deposit count is 9. DepositCountInvalid { deposit_count: u64, range_end: u64 }, + /// You can't request deposits on or before the finalized deposit + DepositRangeInvalid { + range_start: u64, + finalized_count: u64, + }, + /// You can't finalize what's already been finalized and the cache must have the logs + /// that you wish to finalize + InvalidFinalizeIndex { + requested_count: u64, + currently_finalized: u64, + deposit_count: u64, + }, /// Error with the merkle tree for deposits. DepositTree(merkle_proof::MerkleTreeError), /// An unexpected condition was encountered. Internal(String), + /// This is for errors that should never occur + PleaseNotifyTheDevs, } -#[derive(Encode, Decode, Clone)] +pub type SszDepositCache = SszDepositCacheV13; + +#[superstruct( + variants(V1, V13), + variant_attributes(derive(Encode, Decode, Clone)), + no_enum +)] pub struct SszDepositCache { - logs: Vec, - leaves: Vec, - deposit_contract_deploy_block: u64, - deposit_roots: Vec, + pub logs: Vec, + pub leaves: Vec, + pub deposit_contract_deploy_block: u64, + #[superstruct(only(V13))] + pub finalized_deposit_count: u64, + #[superstruct(only(V13))] + pub finalized_block_height: u64, + #[superstruct(only(V13))] + pub deposit_tree_snapshot: Option, + pub deposit_roots: Vec, } impl SszDepositCache { @@ -45,13 +77,37 @@ impl SszDepositCache { logs: cache.logs.clone(), leaves: cache.leaves.clone(), deposit_contract_deploy_block: cache.deposit_contract_deploy_block, + finalized_deposit_count: cache.finalized_deposit_count, + finalized_block_height: cache.finalized_block_height, + deposit_tree_snapshot: cache.deposit_tree.get_snapshot(), deposit_roots: cache.deposit_roots.clone(), } } pub fn to_deposit_cache(&self) -> Result { - let deposit_tree = - DepositDataTree::create(&self.leaves, self.leaves.len(), DEPOSIT_TREE_DEPTH); + let deposit_tree = self + .deposit_tree_snapshot + .as_ref() + .map(|snapshot| { + let mut tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) + .map_err(|e| format!("Invalid SszDepositCache: {:?}", e))?; + for leaf in &self.leaves { + tree.push_leaf(*leaf).map_err(|e| { + format!("Invalid SszDepositCache: unable to push leaf: {:?}", e) + })?; + } + Ok::<_, String>(tree) + }) + .unwrap_or_else(|| { + // deposit_tree_snapshot = None (tree was never finalized) + // Create DepositDataTree from leaves + Ok(DepositDataTree::create( + &self.leaves, + self.leaves.len(), + DEPOSIT_TREE_DEPTH, + )) + })?; + // Check for invalid SszDepositCache conditions if self.leaves.len() != self.logs.len() { return Err("Invalid SszDepositCache: logs and leaves should have equal length".into()); @@ -67,6 +123,8 @@ impl SszDepositCache { logs: self.logs.clone(), leaves: self.leaves.clone(), deposit_contract_deploy_block: self.deposit_contract_deploy_block, + finalized_deposit_count: self.finalized_deposit_count, + finalized_block_height: self.finalized_block_height, deposit_tree, deposit_roots: self.deposit_roots.clone(), }) @@ -76,10 +134,13 @@ impl SszDepositCache { /// Mirrors the merkle tree of deposits in the eth1 deposit contract. /// /// Provides `Deposit` objects with merkle proofs included. +#[cfg_attr(test, derive(PartialEq))] pub struct DepositCache { logs: Vec, leaves: Vec, deposit_contract_deploy_block: u64, + finalized_deposit_count: u64, + finalized_block_height: u64, /// An incremental merkle tree which represents the current state of the /// deposit contract tree. deposit_tree: DepositDataTree, @@ -96,6 +157,8 @@ impl Default for DepositCache { logs: Vec::new(), leaves: Vec::new(), deposit_contract_deploy_block: 1, + finalized_deposit_count: 0, + finalized_block_height: 0, deposit_tree, deposit_roots, } @@ -114,33 +177,111 @@ impl DepositCache { pub fn new(deposit_contract_deploy_block: u64) -> Self { DepositCache { deposit_contract_deploy_block, + finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), ..Self::default() } } - /// Returns the number of deposits available in the cache. + pub fn from_deposit_snapshot( + deposit_contract_deploy_block: u64, + snapshot: &DepositTreeSnapshot, + ) -> Result { + let deposit_tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) + .map_err(|e| format!("Invalid DepositSnapshot: {:?}", e))?; + Ok(DepositCache { + logs: Vec::new(), + leaves: Vec::new(), + deposit_contract_deploy_block, + finalized_deposit_count: snapshot.deposit_count, + finalized_block_height: snapshot.execution_block_height, + deposit_tree, + deposit_roots: vec![snapshot.deposit_root], + }) + } + + /// Returns the number of deposits the cache stores pub fn len(&self) -> usize { - self.logs.len() + self.finalized_deposit_count as usize + self.logs.len() } /// True if the cache does not store any blocks. pub fn is_empty(&self) -> bool { - self.logs.is_empty() + self.finalized_deposit_count != 0 && self.logs.is_empty() } /// Returns the block number for the most recent deposit in the cache. - pub fn latest_block_number(&self) -> Option { - self.logs.last().map(|log| log.block_number) + pub fn latest_block_number(&self) -> u64 { + self.logs + .last() + .map(|log| log.block_number) + .unwrap_or(self.finalized_block_height) } - /// Returns an iterator over all the logs in `self`. + /// Returns an iterator over all the logs in `self` that aren't finalized. pub fn iter(&self) -> impl Iterator { self.logs.iter() } - /// Returns the i'th deposit log. - pub fn get(&self, i: usize) -> Option<&DepositLog> { - self.logs.get(i) + /// Returns the deposit log with INDEX i. + pub fn get_log(&self, i: usize) -> Option<&DepositLog> { + let finalized_deposit_count = self.finalized_deposit_count as usize; + if i < finalized_deposit_count { + None + } else { + self.logs.get(i - finalized_deposit_count) + } + } + + /// Returns the deposit root with DEPOSIT COUNT (not index) i + pub fn get_root(&self, i: usize) -> Option<&Hash256> { + let finalized_deposit_count = self.finalized_deposit_count as usize; + if i < finalized_deposit_count { + None + } else { + self.deposit_roots.get(i - finalized_deposit_count) + } + } + + /// Returns the finalized deposit count + pub fn finalized_deposit_count(&self) -> u64 { + self.finalized_deposit_count + } + + /// Finalizes the cache up to `eth1_block.deposit_count`. + pub fn finalize(&mut self, eth1_block: Eth1Block) -> Result<(), Error> { + let deposits_to_finalize = eth1_block.deposit_count.ok_or_else(|| { + Error::Internal("Eth1Block did not contain deposit_count".to_string()) + })?; + + let currently_finalized = self.finalized_deposit_count; + if deposits_to_finalize > self.len() as u64 || deposits_to_finalize <= currently_finalized { + Err(Error::InvalidFinalizeIndex { + requested_count: deposits_to_finalize, + currently_finalized, + deposit_count: self.len() as u64, + }) + } else { + let finalized_log = self + .get_log((deposits_to_finalize - 1) as usize) + .cloned() + .ok_or(Error::PleaseNotifyTheDevs)?; + let drop = (deposits_to_finalize - currently_finalized) as usize; + self.deposit_tree + .finalize(eth1_block.into()) + .map_err(Error::DepositTree)?; + self.logs.drain(0..drop); + self.leaves.drain(0..drop); + self.deposit_roots.drain(0..drop); + self.finalized_deposit_count = deposits_to_finalize; + self.finalized_block_height = finalized_log.block_number; + + Ok(()) + } + } + + /// Returns the deposit tree snapshot (if tree is finalized) + pub fn get_deposit_snapshot(&self) -> Option { + self.deposit_tree.get_snapshot() } /// Adds `log` to self. @@ -153,19 +294,29 @@ impl DepositCache { /// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty). /// - If a log with `log.index` is already known, but the given `log` is distinct to it. pub fn insert_log(&mut self, log: DepositLog) -> Result { - match log.index.cmp(&(self.logs.len() as u64)) { + match log.index.cmp(&(self.len() as u64)) { Ordering::Equal => { let deposit = log.deposit_data.tree_hash_root(); - self.leaves.push(deposit); - self.logs.push(log); + // should push to deposit_tree first because it's fallible self.deposit_tree .push_leaf(deposit) .map_err(Error::DepositTree)?; + self.leaves.push(deposit); + self.logs.push(log); self.deposit_roots.push(self.deposit_tree.root()); Ok(DepositCacheInsertOutcome::Inserted) } Ordering::Less => { - if self.logs[log.index as usize] == log { + let mut compare_index = log.index as usize; + if log.index < self.finalized_deposit_count { + return Err(Error::FinalizedLogInsert { + log_index: log.index, + finalized_index: self.finalized_deposit_count - 1, + }); + } else { + compare_index -= self.finalized_deposit_count as usize; + } + if self.logs[compare_index] == log { Ok(DepositCacheInsertOutcome::Duplicate) } else { Err(Error::DuplicateDistinctLog(log.index)) @@ -187,14 +338,13 @@ impl DepositCache { /// /// ## Errors /// - /// - If `deposit_count` is larger than `end`. + /// - If `deposit_count` is less than `end`. /// - There are not sufficient deposits in the tree to generate the proof. pub fn get_deposits( &self, start: u64, end: u64, deposit_count: u64, - tree_depth: usize, ) -> Result<(Hash256, Vec), Error> { if deposit_count < end { // It's invalid to ask for more deposits than should exist. @@ -202,48 +352,66 @@ impl DepositCache { deposit_count, range_end: end, }) - } else if end > self.logs.len() as u64 { + } else if end > self.len() as u64 { // The range of requested deposits exceeds the deposits stored locally. Err(Error::InsufficientDeposits { requested: end, known_deposits: self.logs.len(), }) - } else if deposit_count > self.leaves.len() as u64 { - // There are not `deposit_count` known deposit roots, so we can't build the merkle tree - // to prove into. - Err(Error::InsufficientDeposits { - requested: deposit_count, - known_deposits: self.logs.len(), + } else if self.finalized_deposit_count > start { + // Can't ask for deposits before or on the finalized deposit + Err(Error::DepositRangeInvalid { + range_start: start, + finalized_count: self.finalized_deposit_count, }) } else { + let (start, end, deposit_count) = ( + start - self.finalized_deposit_count, + end - self.finalized_deposit_count, + deposit_count - self.finalized_deposit_count, + ); let leaves = self .leaves .get(0..deposit_count as usize) .ok_or_else(|| Error::Internal("Unable to get known leaves".into()))?; - // Note: there is likely a more optimal solution than recreating the `DepositDataTree` - // each time this function is called. - // - // Perhaps a base merkle tree could be maintained that contains all deposits up to the - // last finalized eth1 deposit count. Then, that tree could be cloned and extended for - // each of these calls. + let tree = self + .deposit_tree + .get_snapshot() + .map(|snapshot| { + // The tree has already been finalized. So we can just start from the snapshot + // and replay the deposits up to `deposit_count` + let mut tree = DepositDataTree::from_snapshot(&snapshot, DEPOSIT_TREE_DEPTH) + .map_err(Error::DepositTree)?; + for leaf in leaves { + tree.push_leaf(*leaf).map_err(Error::DepositTree)?; + } + Ok(tree) + }) + .unwrap_or_else(|| { + // Deposit tree hasn't been finalized yet, will have to re-create the whole tree + Ok(DepositDataTree::create( + leaves, + leaves.len(), + DEPOSIT_TREE_DEPTH, + )) + })?; - let tree = DepositDataTree::create(leaves, deposit_count as usize, tree_depth); - - let deposits = self - .logs + let mut deposits = vec![]; + self.logs .get(start as usize..end as usize) .ok_or_else(|| Error::Internal("Unable to get known log".into()))? .iter() - .map(|deposit_log| { - let (_leaf, proof) = tree.generate_proof(deposit_log.index as usize); - - Deposit { + .try_for_each(|deposit_log| { + let (_leaf, proof) = tree + .generate_proof(deposit_log.index as usize) + .map_err(Error::DepositTree)?; + deposits.push(Deposit { proof: proof.into(), data: deposit_log.deposit_data.clone(), - } - }) - .collect(); + }); + Ok(()) + })?; Ok((tree.root(), deposits)) } @@ -270,16 +438,24 @@ impl DepositCache { /// Returns the number of deposits that have been observed up to and /// including the block at `block_number`. /// - /// Returns `None` if the `block_number` is zero or prior to contract deployment. + /// Returns `None` if the `block_number` is zero or prior to contract deployment + /// or prior to last finalized deposit. pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option { - if block_number == 0 || block_number < self.deposit_contract_deploy_block { + if block_number == 0 + || block_number < self.deposit_contract_deploy_block + || block_number < self.finalized_block_height + { None + } else if block_number == self.finalized_block_height { + Some(self.finalized_deposit_count) } else { Some( - self.logs - .iter() - .take_while(|deposit| deposit.block_number <= block_number) - .count() as u64, + self.finalized_deposit_count + + self + .logs + .iter() + .take_while(|deposit| deposit.block_number <= block_number) + .count() as u64, ) } } @@ -289,8 +465,8 @@ impl DepositCache { /// Fetches the `deposit_count` on or just before the queried `block_number` /// and queries the `deposit_roots` map to get the corresponding `deposit_root`. pub fn get_deposit_root_from_cache(&self, block_number: u64) -> Option { - let index = self.get_deposit_count_from_cache(block_number)?; - Some(*self.deposit_roots.get(index as usize)?) + let count = self.get_deposit_count_from_cache(block_number)?; + self.get_root(count as usize).cloned() } } @@ -300,8 +476,6 @@ pub mod tests { use execution_layer::http::deposit_log::Log; use types::{EthSpec, MainnetEthSpec}; - pub const TREE_DEPTH: usize = 32; - /// The data from a deposit event, using the v0.8.3 version of the deposit contract. pub const EXAMPLE_LOG: &[u8] = &[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -338,32 +512,52 @@ pub mod tests { log.to_deposit_log(&spec).expect("should decode log") } + fn get_cache_with_deposits(n: u64) -> DepositCache { + let mut deposit_cache = DepositCache::default(); + for i in 0..n { + let mut log = example_log(); + log.index = i; + log.block_number = i; + log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); + deposit_cache + .insert_log(log) + .expect("should add consecutive logs"); + } + assert_eq!(deposit_cache.len() as u64, n, "should have {} deposits", n); + + deposit_cache + } + #[test] fn insert_log_valid() { - let mut tree = DepositCache::default(); + let mut deposit_cache = DepositCache::default(); for i in 0..16 { let mut log = example_log(); log.index = i; - tree.insert_log(log).expect("should add consecutive logs"); + deposit_cache + .insert_log(log) + .expect("should add consecutive logs"); } } #[test] fn insert_log_invalid() { - let mut tree = DepositCache::default(); + let mut deposit_cache = DepositCache::default(); for i in 0..4 { let mut log = example_log(); log.index = i; - tree.insert_log(log).expect("should add consecutive logs"); + deposit_cache + .insert_log(log) + .expect("should add consecutive logs"); } // Add duplicate, when given is the same as the one known. let mut log = example_log(); log.index = 3; assert_eq!( - tree.insert_log(log).unwrap(), + deposit_cache.insert_log(log).unwrap(), DepositCacheInsertOutcome::Duplicate ); @@ -371,54 +565,40 @@ pub mod tests { let mut log = example_log(); log.index = 3; log.block_number = 99; - assert!(tree.insert_log(log).is_err()); + assert!(deposit_cache.insert_log(log).is_err()); // Skip inserting a log. let mut log = example_log(); log.index = 5; - assert!(tree.insert_log(log).is_err()); + assert!(deposit_cache.insert_log(log).is_err()); } #[test] fn get_deposit_valid() { let n = 1_024; - let mut tree = DepositCache::default(); - - for i in 0..n { - let mut log = example_log(); - log.index = i; - log.block_number = i; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); - tree.insert_log(log).expect("should add consecutive logs"); - } + let deposit_cache = get_cache_with_deposits(n); // Get 0 deposits, with max deposit count. - let (_, deposits) = tree - .get_deposits(0, 0, n, TREE_DEPTH) + let (_, deposits) = deposit_cache + .get_deposits(0, 0, n) .expect("should get the full tree"); assert_eq!(deposits.len(), 0, "should return no deposits"); // Get 0 deposits, with 0 deposit count. - let (_, deposits) = tree - .get_deposits(0, 0, 0, TREE_DEPTH) - .expect("should get the full tree"); - assert_eq!(deposits.len(), 0, "should return no deposits"); - - // Get 0 deposits, with 0 deposit count, tree depth 0. - let (_, deposits) = tree - .get_deposits(0, 0, 0, 0) + let (_, deposits) = deposit_cache + .get_deposits(0, 0, 0) .expect("should get the full tree"); assert_eq!(deposits.len(), 0, "should return no deposits"); // Get all deposits, with max deposit count. - let (full_root, deposits) = tree - .get_deposits(0, n, n, TREE_DEPTH) + let (full_root, deposits) = deposit_cache + .get_deposits(0, n, n) .expect("should get the full tree"); assert_eq!(deposits.len(), n as usize, "should return all deposits"); // Get 4 deposits, with max deposit count. - let (root, deposits) = tree - .get_deposits(0, 4, n, TREE_DEPTH) + let (root, deposits) = deposit_cache + .get_deposits(0, 4, n) .expect("should get the four from the full tree"); assert_eq!( deposits.len(), @@ -432,14 +612,14 @@ pub mod tests { // Get half of the deposits, with half deposit count. let half = n / 2; - let (half_root, deposits) = tree - .get_deposits(0, half, half, TREE_DEPTH) + let (half_root, deposits) = deposit_cache + .get_deposits(0, half, half) .expect("should get the half tree"); assert_eq!(deposits.len(), half as usize, "should return half deposits"); // Get 4 deposits, with half deposit count. - let (root, deposits) = tree - .get_deposits(0, 4, n / 2, TREE_DEPTH) + let (root, deposits) = deposit_cache + .get_deposits(0, 4, n / 2) .expect("should get the half tree"); assert_eq!( deposits.len(), @@ -459,23 +639,455 @@ pub mod tests { #[test] fn get_deposit_invalid() { let n = 16; - let mut tree = DepositCache::default(); - - for i in 0..n { - let mut log = example_log(); - log.index = i; - log.block_number = i; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); - tree.insert_log(log).expect("should add consecutive logs"); - } + let mut tree = get_cache_with_deposits(n); // Range too high. - assert!(tree.get_deposits(0, n + 1, n, TREE_DEPTH).is_err()); + assert!(tree.get_deposits(0, n + 1, n).is_err()); // Count too high. - assert!(tree.get_deposits(0, n, n + 1, TREE_DEPTH).is_err()); + assert!(tree.get_deposits(0, n, n + 1).is_err()); // Range higher than count. - assert!(tree.get_deposits(0, 4, 2, TREE_DEPTH).is_err()); + assert!(tree.get_deposits(0, 4, 2).is_err()); + + let block7 = fake_eth1_block(&tree, 7).expect("should create fake eth1 block"); + tree.finalize(block7).expect("should finalize"); + // Range starts <= finalized deposit + assert!(tree.get_deposits(6, 9, 11).is_err()); + assert!(tree.get_deposits(7, 9, 11).is_err()); + // Range start > finalized deposit should be OK + assert!(tree.get_deposits(8, 9, 11).is_ok()); + } + + // returns an eth1 block that can be used to finalize the cache at `deposit_index` + // this will ensure the `deposit_root` on the `Eth1Block` is correct + fn fake_eth1_block(deposit_cache: &DepositCache, deposit_index: usize) -> Option { + let deposit_log = deposit_cache.get_log(deposit_index)?; + Some(Eth1Block { + hash: Hash256::from_low_u64_be(deposit_log.block_number), + timestamp: 0, + number: deposit_log.block_number, + deposit_root: deposit_cache.get_root(deposit_index + 1).cloned(), + deposit_count: Some(deposit_log.index + 1), + }) + } + + #[test] + fn test_finalization_boundaries() { + let n = 8; + let half = (n / 2) as usize; + + let mut deposit_cache = get_cache_with_deposits(n as u64); + + let full_root_before_finalization = deposit_cache.deposit_tree.root(); + let half_log_plus1_before_finalization = deposit_cache + .get_log(half + 1) + .expect("log should exist") + .clone(); + let half_root_plus1_before_finalization = + *deposit_cache.get_root(half + 1).expect("root should exist"); + + let (root_before_finalization, proof_before_finalization) = deposit_cache + .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) + .expect("should return 1 deposit with proof"); + + // finalize on the tree at half + let half_block = + fake_eth1_block(&deposit_cache, half).expect("fake block should be created"); + assert!( + deposit_cache.get_deposit_snapshot().is_none(), + "snapshot should not exist as tree has not been finalized" + ); + deposit_cache + .finalize(half_block) + .expect("tree should_finalize"); + + // check boundary conditions for get_log + assert!( + deposit_cache.get_log(half).is_none(), + "log at finalized deposit should NOT exist" + ); + assert_eq!( + *deposit_cache.get_log(half + 1).expect("log should exist"), + half_log_plus1_before_finalization, + "log after finalized deposit should match before finalization" + ); + // check boundary conditions for get_root + assert!( + deposit_cache.get_root(half).is_none(), + "root at finalized deposit should NOT exist" + ); + assert_eq!( + *deposit_cache.get_root(half + 1).expect("root should exist"), + half_root_plus1_before_finalization, + "root after finalized deposit should match before finalization" + ); + // full root should match before and after finalization + assert_eq!( + deposit_cache.deposit_tree.root(), + full_root_before_finalization, + "full root should match before and after finalization" + ); + // check boundary conditions for get_deposits (proof) + assert!( + deposit_cache + .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) + .is_err(), + "cannot prove the finalized deposit" + ); + let (root_after_finalization, proof_after_finalization) = deposit_cache + .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) + .expect("should return 1 deposit with proof"); + assert_eq!( + root_before_finalization, root_after_finalization, + "roots before and after finalization should match" + ); + assert_eq!( + proof_before_finalization, proof_after_finalization, + "proof before and after finalization should match" + ); + + // recover tree from snapshot by replaying deposits + let snapshot = deposit_cache + .get_deposit_snapshot() + .expect("snapshot should exist"); + let mut recovered = DepositCache::from_deposit_snapshot(1, &snapshot) + .expect("should recover finalized tree"); + for i in half + 1..n { + let mut log = example_log(); + log.index = i as u64; + log.block_number = i as u64; + log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i as u64); + recovered + .insert_log(log) + .expect("should add consecutive logs"); + } + + // check the same boundary conditions above for the recovered tree + assert!( + recovered.get_log(half).is_none(), + "log at finalized deposit should NOT exist" + ); + assert_eq!( + *recovered.get_log(half + 1).expect("log should exist"), + half_log_plus1_before_finalization, + "log after finalized deposit should match before finalization in recovered tree" + ); + // check boundary conditions for get_root + assert!( + recovered.get_root(half).is_none(), + "root at finalized deposit should NOT exist" + ); + assert_eq!( + *recovered.get_root(half + 1).expect("root should exist"), + half_root_plus1_before_finalization, + "root after finalized deposit should match before finalization in recovered tree" + ); + // full root should match before and after finalization + assert_eq!( + recovered.deposit_tree.root(), + full_root_before_finalization, + "full root should match before and after finalization" + ); + // check boundary conditions for get_deposits (proof) + assert!( + recovered + .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) + .is_err(), + "cannot prove the finalized deposit" + ); + let (recovered_root_after_finalization, recovered_proof_after_finalization) = recovered + .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) + .expect("should return 1 deposit with proof"); + assert_eq!( + root_before_finalization, recovered_root_after_finalization, + "recovered roots before and after finalization should match" + ); + assert_eq!( + proof_before_finalization, recovered_proof_after_finalization, + "recovered proof before and after finalization should match" + ); + } + + #[test] + fn test_finalization() { + let n = 1024; + let half = n / 2; + let quarter = half / 2; + let mut deposit_cache = get_cache_with_deposits(n); + + let full_root_before_finalization = deposit_cache.deposit_tree.root(); + let q3_root_before_finalization = deposit_cache + .get_root((half + quarter) as usize) + .cloned() + .expect("root should exist"); + let q3_log_before_finalization = deposit_cache + .get_log((half + quarter) as usize) + .cloned() + .expect("log should exist"); + // get_log(half+quarter) should return log with index `half+quarter` + assert_eq!( + q3_log_before_finalization.index, + (half + quarter) as u64, + "log index should be {}", + (half + quarter), + ); + + // get lower quarter of deposits with max deposit count + let (lower_quarter_root_before_finalization, lower_quarter_deposits_before_finalization) = + deposit_cache + .get_deposits(quarter, half, n) + .expect("should get lower quarter"); + assert_eq!( + lower_quarter_deposits_before_finalization.len(), + quarter as usize, + "should get {} deposits from lower quarter", + quarter, + ); + // since the lower quarter was done with full deposits, root should be the same as full_root_before_finalization + assert_eq!( + lower_quarter_root_before_finalization, full_root_before_finalization, + "should still get full root with deposit subset", + ); + + // get upper quarter of deposits with slightly reduced deposit count + let (upper_quarter_root_before_finalization, upper_quarter_deposits_before_finalization) = + deposit_cache + .get_deposits(half, half + quarter, n - 2) + .expect("should get upper quarter"); + assert_eq!( + upper_quarter_deposits_before_finalization.len(), + quarter as usize, + "should get {} deposits from upper quarter", + quarter, + ); + // since upper quarter was with subset of nodes, it should differ from full root + assert_ne!( + full_root_before_finalization, upper_quarter_root_before_finalization, + "subtree root should differ from full root", + ); + + let f0_log = deposit_cache + .get_log((quarter - 1) as usize) + .cloned() + .expect("should return log"); + let f0_block = fake_eth1_block(&deposit_cache, (quarter - 1) as usize) + .expect("fake eth1 block should be created"); + + // finalize first quarter + deposit_cache + .finalize(f0_block) + .expect("should finalize first quarter"); + // finalized count and block number should match log + assert_eq!( + deposit_cache.finalized_deposit_count, + f0_log.index + 1, + "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", + ); + assert_eq!( + deposit_cache.finalized_block_height, + f0_log.block_number, + "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" + ); + // check get_log boundaries + assert!( + deposit_cache.get_log((quarter - 1) as usize).is_none(), + "get_log() should return None for index <= finalized log index", + ); + assert!( + deposit_cache.get_log(quarter as usize).is_some(), + "get_log() should return Some(log) for index >= finalized_deposit_count", + ); + + // full root should remain the same after finalization + assert_eq!( + full_root_before_finalization, + deposit_cache.deposit_tree.root(), + "root should be the same before and after finalization", + ); + // get_root should return the same root before and after finalization + assert_eq!( + q3_root_before_finalization, + deposit_cache + .get_root((half + quarter) as usize) + .cloned() + .expect("root should exist"), + "get_root should return the same root before and after finalization", + ); + // get_log should return the same log before and after finalization + assert_eq!( + q3_log_before_finalization, + deposit_cache + .get_log((half + quarter) as usize) + .cloned() + .expect("log should exist"), + "get_log should return the same log before and after finalization", + ); + + // again get lower quarter of deposits with max deposit count after finalization + let (f0_lower_quarter_root, f0_lower_quarter_deposits) = deposit_cache + .get_deposits(quarter, half, n) + .expect("should get lower quarter"); + assert_eq!( + f0_lower_quarter_deposits.len(), + quarter as usize, + "should get {} deposits from lower quarter", + quarter, + ); + // again get upper quarter of deposits with slightly reduced deposit count after finalization + let (f0_upper_quarter_root, f0_upper_quarter_deposits) = deposit_cache + .get_deposits(half, half + quarter, n - 2) + .expect("should get upper quarter"); + assert_eq!( + f0_upper_quarter_deposits.len(), + quarter as usize, + "should get {} deposits from upper quarter", + quarter, + ); + + // lower quarter root and deposits should be the same + assert_eq!( + lower_quarter_root_before_finalization, f0_lower_quarter_root, + "root should be the same before and after finalization", + ); + for i in 0..lower_quarter_deposits_before_finalization.len() { + assert_eq!( + lower_quarter_deposits_before_finalization[i], f0_lower_quarter_deposits[i], + "get_deposits() should be the same before and after finalization", + ); + } + // upper quarter root and deposits should be the same + assert_eq!( + upper_quarter_root_before_finalization, f0_upper_quarter_root, + "subtree root should be the same before and after finalization", + ); + for i in 0..upper_quarter_deposits_before_finalization.len() { + assert_eq!( + upper_quarter_deposits_before_finalization[i], f0_upper_quarter_deposits[i], + "get_deposits() should be the same before and after finalization", + ); + } + + let f1_log = deposit_cache + .get_log((half - 2) as usize) + .cloned() + .expect("should return log"); + // finalize a little less than half to test multiple finalization + let f1_block = fake_eth1_block(&deposit_cache, (half - 2) as usize) + .expect("should create fake eth1 block"); + deposit_cache + .finalize(f1_block) + .expect("should finalize a little less than half"); + // finalized count and block number should match f1_log + assert_eq!( + deposit_cache.finalized_deposit_count, + f1_log.index + 1, + "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", + ); + assert_eq!( + deposit_cache.finalized_block_height, + f1_log.block_number, + "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" + ); + // check get_log boundaries + assert!( + deposit_cache.get_log((half - 2) as usize).is_none(), + "get_log() should return None for index <= finalized log index", + ); + assert!( + deposit_cache.get_log((half - 1) as usize).is_some(), + "get_log() should return Some(log) for index >= finalized_deposit_count", + ); + + // full root should still be unchanged + assert_eq!( + full_root_before_finalization, + deposit_cache.deposit_tree.root(), + "root should be the same before and after finalization", + ); + + // again get upper quarter of deposits with slightly reduced deposit count after second finalization + let (f1_upper_quarter_root, f1_upper_quarter_deposits) = deposit_cache + .get_deposits(half, half + quarter, n - 2) + .expect("should get upper quarter"); + + // upper quarter root and deposits should be the same after second finalization + assert_eq!( + f0_upper_quarter_root, f1_upper_quarter_root, + "subtree root should be the same after multiple finalization", + ); + for i in 0..f0_upper_quarter_deposits.len() { + assert_eq!( + f0_upper_quarter_deposits[i], f1_upper_quarter_deposits[i], + "get_deposits() should be the same before and after finalization", + ); + } + } + + fn verify_equality(original: &DepositCache, copy: &DepositCache) { + // verify each field individually so that if one field should + // fail to recover, this test will point right to it + assert_eq!(original.deposit_contract_deploy_block, copy.deposit_contract_deploy_block, "DepositCache: deposit_contract_deploy_block should remain the same after encoding and decoding from ssz" ); + assert_eq!( + original.leaves, copy.leaves, + "DepositCache: leaves should remain the same after encoding and decoding from ssz" + ); + assert_eq!( + original.logs, copy.logs, + "DepositCache: logs should remain the same after encoding and decoding from ssz" + ); + assert_eq!(original.finalized_deposit_count, copy.finalized_deposit_count, "DepositCache: finalized_deposit_count should remain the same after encoding and decoding from ssz"); + assert_eq!(original.finalized_block_height, copy.finalized_block_height, "DepositCache: finalized_block_height should remain the same after encoding and decoding from ssz"); + assert_eq!(original.deposit_roots, copy.deposit_roots, "DepositCache: deposit_roots should remain the same before and after encoding and decoding from ssz"); + assert!(original.deposit_tree == copy.deposit_tree, "DepositCache: deposit_tree should remain the same before and after encoding and decoding from ssz"); + // verify all together for good measure + assert!( + original == copy, + "Deposit cache should remain the same after encoding and decoding from ssz" + ); + } + + fn ssz_round_trip(original: &DepositCache) -> DepositCache { + use ssz::{Decode, Encode}; + let bytes = SszDepositCache::from_deposit_cache(original).as_ssz_bytes(); + let ssz_cache = + SszDepositCache::from_ssz_bytes(&bytes).expect("should decode from ssz bytes"); + + SszDepositCache::to_deposit_cache(&ssz_cache).expect("should recover cache") + } + + #[test] + fn ssz_encode_decode() { + let deposit_cache = get_cache_with_deposits(512); + let recovered_cache = ssz_round_trip(&deposit_cache); + + verify_equality(&deposit_cache, &recovered_cache); + } + + #[test] + fn ssz_encode_decode_with_finalization() { + let mut deposit_cache = get_cache_with_deposits(512); + let block383 = fake_eth1_block(&deposit_cache, 383).expect("should create fake eth1 block"); + deposit_cache.finalize(block383).expect("should finalize"); + let mut first_recovery = ssz_round_trip(&deposit_cache); + + verify_equality(&deposit_cache, &first_recovery); + // finalize again to verify equality after multiple finalizations + let block447 = fake_eth1_block(&deposit_cache, 447).expect("should create fake eth1 block"); + first_recovery.finalize(block447).expect("should finalize"); + + let mut second_recovery = ssz_round_trip(&first_recovery); + verify_equality(&first_recovery, &second_recovery); + + // verify equality of a tree that finalized block383, block447, block479 + // with a tree that finalized block383, block479 + let block479 = fake_eth1_block(&deposit_cache, 479).expect("should create fake eth1 block"); + second_recovery + .finalize(block479.clone()) + .expect("should finalize"); + let third_recovery = ssz_round_trip(&second_recovery); + deposit_cache.finalize(block479).expect("should finalize"); + + verify_equality(&deposit_cache, &third_recovery); } } diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index b0a951bef0..0468a02d2e 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -2,14 +2,15 @@ use crate::service::endpoint_from_config; use crate::Config; use crate::{ block_cache::{BlockCache, Eth1Block}, - deposit_cache::{DepositCache, SszDepositCache}, + deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13}, }; use execution_layer::HttpJsonRpc; use parking_lot::RwLock; use ssz::four_byte_option_impl; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use types::ChainSpec; +use superstruct::superstruct; +use types::{ChainSpec, DepositTreeSnapshot, Eth1Data}; // Define "legacy" implementations of `Option` which use four bytes for encoding the union // selector. @@ -29,12 +30,25 @@ impl DepositUpdater { last_processed_block: None, } } + + pub fn from_snapshot( + deposit_contract_deploy_block: u64, + snapshot: &DepositTreeSnapshot, + ) -> Result { + let last_processed_block = Some(snapshot.execution_block_height); + Ok(Self { + cache: DepositCache::from_deposit_snapshot(deposit_contract_deploy_block, snapshot)?, + last_processed_block, + }) + } } pub struct Inner { pub block_cache: RwLock, pub deposit_cache: RwLock, pub endpoint: HttpJsonRpc, + // this gets set to Some(Eth1Data) when the deposit finalization conditions are met + pub to_finalize: RwLock>, pub config: RwLock, pub remote_head_block: RwLock>, pub spec: ChainSpec, @@ -58,9 +72,13 @@ impl Inner { /// Recover `Inner` given byte representation of eth1 deposit and block caches. pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result { - let ssz_cache = SszEth1Cache::from_ssz_bytes(bytes) - .map_err(|e| format!("Ssz decoding error: {:?}", e))?; - ssz_cache.to_inner(config, spec) + SszEth1Cache::from_ssz_bytes(bytes) + .map_err(|e| format!("Ssz decoding error: {:?}", e))? + .to_inner(config, spec) + .map(|inner| { + inner.block_cache.write().rebuild_by_hash_map(); + inner + }) } /// Returns a reference to the specification. @@ -69,12 +87,21 @@ impl Inner { } } -#[derive(Encode, Decode, Clone)] +pub type SszEth1Cache = SszEth1CacheV13; + +#[superstruct( + variants(V1, V13), + variant_attributes(derive(Encode, Decode, Clone)), + no_enum +)] pub struct SszEth1Cache { - block_cache: BlockCache, - deposit_cache: SszDepositCache, + pub block_cache: BlockCache, + #[superstruct(only(V1))] + pub deposit_cache: SszDepositCacheV1, + #[superstruct(only(V13))] + pub deposit_cache: SszDepositCacheV13, #[ssz(with = "four_byte_option_u64")] - last_processed_block: Option, + pub last_processed_block: Option, } impl SszEth1Cache { @@ -97,6 +124,7 @@ impl SszEth1Cache { }), endpoint: endpoint_from_config(&config) .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, + to_finalize: RwLock::new(None), // Set the remote head_block zero when creating a new instance. We only care about // present and future eth1 nodes. remote_head_block: RwLock::new(None), diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index f99d085250..3b288de490 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -8,9 +8,9 @@ mod metrics; mod service; pub use block_cache::{BlockCache, Eth1Block}; -pub use deposit_cache::DepositCache; +pub use deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13}; pub use execution_layer::http::deposit_log::DepositLog; -pub use inner::SszEth1Cache; +pub use inner::{SszEth1Cache, SszEth1CacheV1, SszEth1CacheV13}; pub use service::{ BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service, DEFAULT_CHAIN_ID, diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index c6b87e88e3..f24b746cd4 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -20,7 +20,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{interval_at, Duration, Instant}; -use types::{ChainSpec, EthSpec, Unsigned}; +use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned}; /// Indicates the default eth1 chain id we use for the deposit contract. pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; @@ -63,7 +63,13 @@ async fn endpoint_state( config_chain_id: &Eth1Id, log: &Logger, ) -> EndpointState { - let error_connecting = |e| { + let error_connecting = |e: String| { + debug!( + log, + "eth1 endpoint error"; + "endpoint" => %endpoint, + "error" => &e, + ); warn!( log, "Error connecting to eth1 node endpoint"; @@ -213,6 +219,10 @@ pub enum Error { GetDepositLogsFailed(String), /// There was an unexpected internal error. Internal(String), + /// Error finalizing deposit + FailedToFinalizeDeposit(String), + /// There was a problem Initializing from deposit snapshot + FailedToInitializeFromSnapshot(String), } /// The success message for an Eth1Data cache update. @@ -395,6 +405,7 @@ impl Service { config.deposit_contract_deploy_block, )), endpoint: endpoint_from_config(&config)?, + to_finalize: RwLock::new(None), remote_head_block: RwLock::new(None), config: RwLock::new(config), spec, @@ -407,6 +418,36 @@ impl Service { &self.inner.endpoint } + /// Creates a new service, initializing the deposit tree from a snapshot. + pub fn from_deposit_snapshot( + config: Config, + log: Logger, + spec: ChainSpec, + deposit_snapshot: &DepositTreeSnapshot, + ) -> Result { + let deposit_cache = + DepositUpdater::from_snapshot(config.deposit_contract_deploy_block, deposit_snapshot) + .map_err(Error::FailedToInitializeFromSnapshot)?; + + Ok(Self { + inner: Arc::new(Inner { + block_cache: <_>::default(), + deposit_cache: RwLock::new(deposit_cache), + endpoint: endpoint_from_config(&config) + .map_err(Error::FailedToInitializeFromSnapshot)?, + to_finalize: RwLock::new(None), + remote_head_block: RwLock::new(None), + config: RwLock::new(config), + spec, + }), + log, + }) + } + + pub fn set_to_finalize(&self, eth1_data: Option) { + *(self.inner.to_finalize.write()) = eth1_data; + } + /// Returns the follow distance that has been shortened to accommodate for differences in the /// spacing between blocks. /// @@ -521,7 +562,7 @@ impl Service { let deposits = self.deposits().read(); deposits .cache - .get_valid_signature_count(deposits.cache.latest_block_number()?) + .get_valid_signature_count(deposits.cache.latest_block_number()) } /// Returns the number of deposits with valid signatures that have been observed up to and @@ -619,7 +660,8 @@ impl Service { "old_block_number" => deposit_cache.last_processed_block, "new_block_number" => deposit_cache.cache.latest_block_number(), ); - deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number(); + deposit_cache.last_processed_block = + Some(deposit_cache.cache.latest_block_number()); } let outcome = @@ -698,6 +740,37 @@ impl Service { "deposits" => format!("{:?}", deposit), ), }; + let optional_eth1data = self.inner.to_finalize.write().take(); + if let Some(eth1data_to_finalize) = optional_eth1data { + let already_finalized = self + .inner + .deposit_cache + .read() + .cache + .finalized_deposit_count(); + let deposit_count_to_finalize = eth1data_to_finalize.deposit_count; + if deposit_count_to_finalize > already_finalized { + match self.finalize_deposits(eth1data_to_finalize) { + Err(e) => error!( + self.log, + "Failed to finalize deposit cache"; + "error" => ?e, + ), + Ok(()) => info!( + self.log, + "Successfully finalized deposit tree"; + "finalized deposit count" => deposit_count_to_finalize, + ), + } + } else { + debug!( + self.log, + "Deposits tree already finalized"; + "already_finalized" => already_finalized, + "deposit_count_to_finalize" => deposit_count_to_finalize, + ); + } + } Ok(()) } @@ -733,6 +806,30 @@ impl Service { ) } + pub fn finalize_deposits(&self, eth1_data: Eth1Data) -> Result<(), Error> { + let eth1_block = self + .inner + .block_cache + .read() + .block_by_hash(ð1_data.block_hash) + .cloned() + .ok_or_else(|| { + Error::FailedToFinalizeDeposit( + "Finalized block not found in block cache".to_string(), + ) + })?; + self.inner + .deposit_cache + .write() + .cache + .finalize(eth1_block) + .map_err(|e| Error::FailedToFinalizeDeposit(format!("{:?}", e))) + } + + pub fn get_deposit_snapshot(&self) -> Option { + self.inner.deposit_cache.read().cache.get_deposit_snapshot() + } + /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured /// follow-distance block. /// diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 7e58f07e24..069a6e4aad 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -400,7 +400,7 @@ mod deposit_tree { .deposits() .read() .cache - .get_deposits(first, last, last, 32) + .get_deposits(first, last, last) .unwrap_or_else(|_| panic!("should get deposits in round {}", round)); assert_eq!( @@ -551,7 +551,7 @@ mod deposit_tree { // Ensure that the root from the deposit tree matches what the contract reported. let (root, deposits) = tree - .get_deposits(0, i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH) + .get_deposits(0, i as u64, deposit_counts[i]) .expect("should get deposits"); assert_eq!( root, deposit_roots[i], diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index be68c37b06..7453663012 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -208,6 +208,7 @@ pub mod deposit_methods { #[derive(Clone, Copy)] pub enum BlockQuery { Number(u64), + Hash(Hash256), Latest, } @@ -322,9 +323,12 @@ pub mod deposit_methods { query: BlockQuery, timeout: Duration, ) -> Result { - let query_param = match query { - BlockQuery::Number(block_number) => format!("0x{:x}", block_number), - BlockQuery::Latest => "latest".to_string(), + let (method, query_param) = match query { + BlockQuery::Number(block_number) => { + ("eth_getBlockByNumber", format!("0x{:x}", block_number)) + } + BlockQuery::Hash(block_hash) => ("eth_getBlockByHash", format!("{:?}", block_hash)), + BlockQuery::Latest => ("eth_getBlockByNumber", "latest".to_string()), }; let params = json!([ query_param, @@ -332,9 +336,9 @@ pub mod deposit_methods { ]); let response: Value = self - .rpc_request("eth_getBlockByNumber", params, timeout) + .rpc_request(method, params, timeout) .await - .map_err(|e| format!("eth_getBlockByNumber call failed {:?}", e))?; + .map_err(|e| format!("{} call failed {:?}", method, e))?; let hash: Vec = hex_to_bytes( response diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs index 0d483f9834..06bf99f9f6 100644 --- a/beacon_node/genesis/src/common.rs +++ b/beacon_node/genesis/src/common.rs @@ -23,7 +23,9 @@ pub fn genesis_deposits( return Err(String::from("Failed to push leaf")); } - let (_, mut proof) = tree.generate_proof(i, depth); + let (_, mut proof) = tree + .generate_proof(i, depth) + .map_err(|e| format!("Error generating merkle proof: {:?}", e))?; proof.push(Hash256::from_slice(&int_to_fixed_bytes32((i + 1) as u64))); assert_eq!( diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 5614e237ff..b7134e37c4 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -86,7 +86,7 @@ impl Eth1GenesisService { .deposits() .read() .cache - .get(min_genesis_active_validator_count.saturating_sub(1)) + .get_log(min_genesis_active_validator_count.saturating_sub(1)) .map(|log| log.block_number) } } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5b4fa5816d..4267a22f98 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1533,6 +1533,53 @@ pub fn serve( }, ); + // GET beacon/deposit_snapshot + let get_beacon_deposit_snapshot = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("deposit_snapshot")) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and(eth1_service_filter.clone()) + .and_then( + |accept_header: Option, eth1_service: eth1::Service| { + blocking_task(move || match accept_header { + Some(api_types::Accept::Json) | None => { + let snapshot = eth1_service.get_deposit_snapshot(); + Ok( + warp::reply::json(&api_types::GenericResponse::from(snapshot)) + .into_response(), + ) + } + _ => eth1_service + .get_deposit_snapshot() + .map(|snapshot| { + Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(snapshot.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + }) + .unwrap_or_else(|| { + Response::builder() + .status(503) + .header("Content-Type", "application/octet-stream") + .body(Vec::new().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + }), + }) + }, + ); + /* * config */ @@ -3120,6 +3167,7 @@ pub fn serve( .or(get_beacon_pool_attester_slashings.boxed()) .or(get_beacon_pool_proposer_slashings.boxed()) .or(get_beacon_pool_voluntary_exits.boxed()) + .or(get_beacon_deposit_snapshot.boxed()) .or(get_config_fork_schedule.boxed()) .or(get_config_spec.boxed()) .or(get_config_deposit_contract.boxed()) diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 4f35c4c072..5cb3f12200 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(12); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 104ca9ccd4..a2fb082a35 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -114,6 +114,7 @@ pub struct Timeouts { pub sync_duties: Duration, pub get_beacon_blocks_ssz: Duration, pub get_debug_beacon_states: Duration, + pub get_deposit_snapshot: Duration, } impl Timeouts { @@ -128,6 +129,7 @@ impl Timeouts { sync_duties: timeout, get_beacon_blocks_ssz: timeout, get_debug_beacon_states: timeout, + get_deposit_snapshot: timeout, } } } @@ -913,6 +915,20 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `GET beacon/deposit_snapshot` + pub async fn get_deposit_snapshot(&self) -> Result, Error> { + use ssz::Decode; + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("deposit_snapshot"); + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_deposit_snapshot) + .await? + .map(|bytes| DepositTreeSnapshot::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz)) + .transpose() + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 91e6a5558b..2dced1c449 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -6,7 +6,10 @@ mod block_rewards; use crate::{ ok_or_error, - types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId}, + types::{ + BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, + GenericResponse, ValidatorId, + }, BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, }; use proto_array::core::ProtoArray; @@ -331,6 +334,19 @@ impl Eth1Block { } } +impl From for FinalizedExecutionBlock { + fn from(eth1_block: Eth1Block) -> Self { + Self { + deposit_count: eth1_block.deposit_count.unwrap_or(0), + deposit_root: eth1_block + .deposit_root + .unwrap_or_else(|| DepositTreeSnapshot::default().deposit_root), + block_hash: eth1_block.hash, + block_height: eth1_block.number, + } + } +} + #[derive(Debug, Serialize, Deserialize)] pub struct DatabaseInfo { pub schema_version: u64, diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index da9b78ff11..887deb1efd 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -19,6 +19,8 @@ lazy_static! { /// indices are populated by non-zero leaves (perfect for the deposit contract tree). #[derive(Debug, PartialEq)] pub enum MerkleTree { + /// Finalized Node + Finalized(H256), /// Leaf node with the hash of its content. Leaf(H256), /// Internal node with hash, left subtree and right subtree. @@ -41,6 +43,24 @@ pub enum MerkleTreeError { DepthTooSmall, // Overflow occurred ArithError, + // Can't finalize a zero node + ZeroNodeFinalized, + // Can't push to finalized node + FinalizedNodePushed, + // Invalid Snapshot + InvalidSnapshot(InvalidSnapshot), + // Can't proof a finalized node + ProofEncounteredFinalizedNode, + // This should never happen + PleaseNotifyTheDevs, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum InvalidSnapshot { + // Branch hashes are empty but deposits are not + EmptyBranchWithNonZeroDeposits(usize), + // End of tree reached but deposits != 1 + EndOfTree, } impl MerkleTree { @@ -97,9 +117,11 @@ impl MerkleTree { let right: &mut MerkleTree = &mut *right; match (&*left, &*right) { // Tree is full - (Leaf(_), Leaf(_)) => return Err(MerkleTreeError::MerkleTreeFull), + (Leaf(_), Leaf(_)) | (Finalized(_), Leaf(_)) => { + return Err(MerkleTreeError::MerkleTreeFull) + } // There is a right node so insert in right node - (Node(_, _, _), Node(_, _, _)) => { + (Node(_, _, _), Node(_, _, _)) | (Finalized(_), Node(_, _, _)) => { right.push_leaf(elem, depth - 1)?; } // Both branches are zero, insert in left one @@ -107,7 +129,7 @@ impl MerkleTree { *left = MerkleTree::create(&[elem], depth - 1); } // Leaf on left branch and zero on right branch, insert on right side - (Leaf(_), Zero(_)) => { + (Leaf(_), Zero(_)) | (Finalized(_), Zero(_)) => { *right = MerkleTree::create(&[elem], depth - 1); } // Try inserting on the left node -> if it fails because it is full, insert in right side. @@ -129,6 +151,7 @@ impl MerkleTree { right.hash().as_bytes(), )); } + Finalized(_) => return Err(MerkleTreeError::FinalizedNodePushed), } Ok(()) @@ -137,6 +160,7 @@ impl MerkleTree { /// Retrieve the root hash of this Merkle tree. pub fn hash(&self) -> H256 { match *self { + MerkleTree::Finalized(h) => h, MerkleTree::Leaf(h) => h, MerkleTree::Node(h, _, _) => h, MerkleTree::Zero(depth) => H256::from_slice(&ZERO_HASHES[depth]), @@ -146,7 +170,7 @@ impl MerkleTree { /// Get a reference to the left and right subtrees if they exist. pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> { match *self { - MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, + MerkleTree::Finalized(_) | MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, MerkleTree::Node(_, ref l, ref r) => Some((l, r)), MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])), } @@ -157,16 +181,125 @@ impl MerkleTree { matches!(self, MerkleTree::Leaf(_)) } + /// Finalize deposits up to deposit with count = deposits_to_finalize + pub fn finalize_deposits( + &mut self, + deposits_to_finalize: usize, + level: usize, + ) -> Result<(), MerkleTreeError> { + match self { + MerkleTree::Finalized(_) => Ok(()), + MerkleTree::Zero(_) => Err(MerkleTreeError::ZeroNodeFinalized), + MerkleTree::Leaf(hash) => { + if level != 0 { + // This shouldn't happen but this is a sanity check + return Err(MerkleTreeError::PleaseNotifyTheDevs); + } + *self = MerkleTree::Finalized(*hash); + Ok(()) + } + MerkleTree::Node(hash, left, right) => { + if level == 0 { + // this shouldn't happen but we'll put it here for safety + return Err(MerkleTreeError::PleaseNotifyTheDevs); + } + let deposits = 0x1 << level; + if deposits <= deposits_to_finalize { + *self = MerkleTree::Finalized(*hash); + return Ok(()); + } + left.finalize_deposits(deposits_to_finalize, level - 1)?; + if deposits_to_finalize > deposits / 2 { + let remaining = deposits_to_finalize - deposits / 2; + right.finalize_deposits(remaining, level - 1)?; + } + Ok(()) + } + } + } + + fn append_finalized_hashes(&self, result: &mut Vec) { + match self { + MerkleTree::Zero(_) | MerkleTree::Leaf(_) => {} + MerkleTree::Finalized(h) => result.push(*h), + MerkleTree::Node(_, left, right) => { + left.append_finalized_hashes(result); + right.append_finalized_hashes(result); + } + } + } + + pub fn get_finalized_hashes(&self) -> Vec { + let mut result = vec![]; + self.append_finalized_hashes(&mut result); + result + } + + pub fn from_finalized_snapshot( + finalized_branch: &[H256], + deposit_count: usize, + level: usize, + ) -> Result { + if finalized_branch.is_empty() { + return if deposit_count == 0 { + Ok(MerkleTree::Zero(level)) + } else { + Err(InvalidSnapshot::EmptyBranchWithNonZeroDeposits(deposit_count).into()) + }; + } + if deposit_count == (0x1 << level) { + return Ok(MerkleTree::Finalized( + *finalized_branch + .get(0) + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + )); + } + if level == 0 { + return Err(InvalidSnapshot::EndOfTree.into()); + } + + let (left, right) = match deposit_count.checked_sub(0x1 << (level - 1)) { + // left tree is fully finalized + Some(right_deposits) => { + let (left_hash, right_branch) = finalized_branch + .split_first() + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?; + ( + MerkleTree::Finalized(*left_hash), + MerkleTree::from_finalized_snapshot(right_branch, right_deposits, level - 1)?, + ) + } + // left tree is not fully finalized -> right tree is zero + None => ( + MerkleTree::from_finalized_snapshot(finalized_branch, deposit_count, level - 1)?, + MerkleTree::Zero(level - 1), + ), + }; + + let hash = H256::from_slice(&hash32_concat( + left.hash().as_bytes(), + right.hash().as_bytes(), + )); + Ok(MerkleTree::Node(hash, Box::new(left), Box::new(right))) + } + /// Return the leaf at `index` and a Merkle proof of its inclusion. /// /// The Merkle proof is in "bottom-up" order, starting with a leaf node /// and moving up the tree. Its length will be exactly equal to `depth`. - pub fn generate_proof(&self, index: usize, depth: usize) -> (H256, Vec) { + pub fn generate_proof( + &self, + index: usize, + depth: usize, + ) -> Result<(H256, Vec), MerkleTreeError> { let mut proof = vec![]; let mut current_node = self; let mut current_depth = depth; while current_depth > 0 { let ith_bit = (index >> (current_depth - 1)) & 0x01; + if let &MerkleTree::Finalized(_) = current_node { + return Err(MerkleTreeError::ProofEncounteredFinalizedNode); + } // Note: unwrap is safe because leaves are only ever constructed at depth == 0. let (left, right) = current_node.left_and_right_branches().unwrap(); @@ -187,7 +320,33 @@ impl MerkleTree { // Put proof in bottom-up order. proof.reverse(); - (current_node.hash(), proof) + Ok((current_node.hash(), proof)) + } + + /// useful for debugging + pub fn print_node(&self, mut space: u32) { + const SPACES: u32 = 10; + space += SPACES; + let (pair, text) = match self { + MerkleTree::Node(hash, left, right) => (Some((left, right)), format!("Node({})", hash)), + MerkleTree::Leaf(hash) => (None, format!("Leaf({})", hash)), + MerkleTree::Zero(depth) => ( + None, + format!("Z[{}]({})", depth, H256::from_slice(&ZERO_HASHES[*depth])), + ), + MerkleTree::Finalized(hash) => (None, format!("Finl({})", hash)), + }; + if let Some((_, right)) = pair { + right.print_node(space); + } + println!(); + for _i in SPACES..space { + print!(" "); + } + println!("{}", text); + if let Some((left, _)) = pair { + left.print_node(space); + } } } @@ -235,6 +394,12 @@ impl From for MerkleTreeError { } } +impl From for MerkleTreeError { + fn from(e: InvalidSnapshot) -> Self { + MerkleTreeError::InvalidSnapshot(e) + } +} + #[cfg(test)] mod tests { use super::*; @@ -255,7 +420,9 @@ mod tests { let merkle_root = merkle_tree.hash(); let proofs_ok = (0..leaves.len()).all(|i| { - let (leaf, branch) = merkle_tree.generate_proof(i, depth); + let (leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) }); @@ -274,7 +441,9 @@ mod tests { let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| { assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(())); - let (stored_leaf, branch) = merkle_tree.generate_proof(i, depth); + let (stored_leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash()) }); diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index d91ddabe02..76d85f775d 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -246,6 +246,20 @@ impl Decode for NonZeroUsize { } } +impl Decode for Option { + fn is_ssz_fixed_len() -> bool { + false + } + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let (selector, body) = split_union_bytes(bytes)?; + match selector.into() { + 0u8 => Ok(None), + 1u8 => ::from_ssz_bytes(body).map(Option::Some), + other => Err(DecodeError::UnionSelectorInvalid(other)), + } + } +} + impl Decode for Arc { fn is_ssz_fixed_len() -> bool { T::is_ssz_fixed_len() diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index cfd95ba40d..833480e1b6 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -203,6 +203,34 @@ impl_encode_for_tuples! { } } +impl Encode for Option { + fn is_ssz_fixed_len() -> bool { + false + } + fn ssz_append(&self, buf: &mut Vec) { + match self { + Option::None => { + let union_selector: u8 = 0u8; + buf.push(union_selector); + } + Option::Some(ref inner) => { + let union_selector: u8 = 1u8; + buf.push(union_selector); + inner.ssz_append(buf); + } + } + } + fn ssz_bytes_len(&self) -> usize { + match self { + Option::None => 1usize, + Option::Some(ref inner) => inner + .ssz_bytes_len() + .checked_add(1) + .expect("encoded length must be less than usize::max_value"), + } + } +} + impl Encode for Arc { fn is_ssz_fixed_len() -> bool { T::is_ssz_fixed_len() @@ -561,6 +589,14 @@ mod tests { ); } + #[test] + fn ssz_encode_option_u8() { + let opt: Option = None; + assert_eq!(opt.as_ssz_bytes(), vec![0]); + let opt: Option = Some(2); + assert_eq!(opt.as_ssz_bytes(), vec![1, 2]); + } + #[test] fn ssz_encode_bool() { assert_eq!(true.as_ssz_bytes(), vec![1]); diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs index e41fc15dd4..b4b91da4b5 100644 --- a/consensus/ssz/tests/tests.rs +++ b/consensus/ssz/tests/tests.rs @@ -22,6 +22,13 @@ mod round_trip { round_trip(items); } + #[test] + fn option_u16() { + let items: Vec> = vec![None, Some(2u16)]; + + round_trip(items); + } + #[test] fn u8_array_4() { let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]]; @@ -46,6 +53,17 @@ mod round_trip { round_trip(items); } + #[test] + fn option_vec_h256() { + let items: Vec>> = vec![ + None, + Some(vec![]), + Some(vec![H256::zero(), H256::from([1; 32]), H256::random()]), + ]; + + round_trip(items); + } + #[test] fn vec_u16() { let items: Vec> = vec![ diff --git a/consensus/state_processing/src/common/deposit_data_tree.rs b/consensus/state_processing/src/common/deposit_data_tree.rs index 46f1ed8ccd..aaad96fbd5 100644 --- a/consensus/state_processing/src/common/deposit_data_tree.rs +++ b/consensus/state_processing/src/common/deposit_data_tree.rs @@ -2,12 +2,14 @@ use eth2_hashing::hash; use int_to_bytes::int_to_bytes32; use merkle_proof::{MerkleTree, MerkleTreeError}; use safe_arith::SafeArith; -use types::Hash256; +use types::{DepositTreeSnapshot, FinalizedExecutionBlock, Hash256}; /// Emulates the eth1 deposit contract merkle tree. +#[derive(PartialEq)] pub struct DepositDataTree { tree: MerkleTree, mix_in_length: usize, + finalized_execution_block: Option, depth: usize, } @@ -17,6 +19,7 @@ impl DepositDataTree { Self { tree: MerkleTree::create(leaves, depth), mix_in_length, + finalized_execution_block: None, depth, } } @@ -38,10 +41,10 @@ impl DepositDataTree { /// /// The Merkle proof is in "bottom-up" order, starting with a leaf node /// and moving up the tree. Its length will be exactly equal to `depth + 1`. - pub fn generate_proof(&self, index: usize) -> (Hash256, Vec) { - let (root, mut proof) = self.tree.generate_proof(index, self.depth); + pub fn generate_proof(&self, index: usize) -> Result<(Hash256, Vec), MerkleTreeError> { + let (root, mut proof) = self.tree.generate_proof(index, self.depth)?; proof.push(Hash256::from_slice(&self.length_bytes())); - (root, proof) + Ok((root, proof)) } /// Add a deposit to the merkle tree. @@ -50,4 +53,50 @@ impl DepositDataTree { self.mix_in_length.safe_add_assign(1)?; Ok(()) } + + /// Finalize deposits up to `finalized_execution_block.deposit_count` + pub fn finalize( + &mut self, + finalized_execution_block: FinalizedExecutionBlock, + ) -> Result<(), MerkleTreeError> { + self.tree + .finalize_deposits(finalized_execution_block.deposit_count as usize, self.depth)?; + self.finalized_execution_block = Some(finalized_execution_block); + Ok(()) + } + + /// Get snapshot of finalized deposit tree (if tree is finalized) + pub fn get_snapshot(&self) -> Option { + let finalized_execution_block = self.finalized_execution_block.as_ref()?; + Some(DepositTreeSnapshot { + finalized: self.tree.get_finalized_hashes(), + deposit_root: finalized_execution_block.deposit_root, + deposit_count: finalized_execution_block.deposit_count, + execution_block_hash: finalized_execution_block.block_hash, + execution_block_height: finalized_execution_block.block_height, + }) + } + + /// Create a new Merkle tree from a snapshot + pub fn from_snapshot( + snapshot: &DepositTreeSnapshot, + depth: usize, + ) -> Result { + Ok(Self { + tree: MerkleTree::from_finalized_snapshot( + &snapshot.finalized, + snapshot.deposit_count as usize, + depth, + )?, + mix_in_length: snapshot.deposit_count as usize, + finalized_execution_block: Some(snapshot.into()), + depth, + }) + } + + #[allow(dead_code)] + pub fn print_tree(&self) { + self.tree.print_node(0); + println!("========================================================"); + } } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 68fdbf7990..d1b2ae1823 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "types" -version = "0.2.0" +version = "0.2.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs new file mode 100644 index 0000000000..21bbab81ff --- /dev/null +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -0,0 +1,83 @@ +use crate::*; +use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use int_to_bytes::int_to_bytes32; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use test_utils::TestRandom; +use DEPOSIT_TREE_DEPTH; + +#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] +pub struct FinalizedExecutionBlock { + pub deposit_root: Hash256, + pub deposit_count: u64, + pub block_hash: Hash256, + pub block_height: u64, +} + +impl From<&DepositTreeSnapshot> for FinalizedExecutionBlock { + fn from(snapshot: &DepositTreeSnapshot) -> Self { + Self { + deposit_root: snapshot.deposit_root, + deposit_count: snapshot.deposit_count, + block_hash: snapshot.execution_block_hash, + block_height: snapshot.execution_block_height, + } + } +} + +#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] +pub struct DepositTreeSnapshot { + pub finalized: Vec, + pub deposit_root: Hash256, + pub deposit_count: u64, + pub execution_block_hash: Hash256, + pub execution_block_height: u64, +} + +impl Default for DepositTreeSnapshot { + fn default() -> Self { + let mut result = Self { + finalized: vec![], + deposit_root: Hash256::default(), + deposit_count: 0, + execution_block_hash: Hash256::zero(), + execution_block_height: 0, + }; + // properly set the empty deposit root + result.deposit_root = result.calculate_root().unwrap(); + result + } +} + +impl DepositTreeSnapshot { + // Calculates the deposit tree root from the hashes in the snapshot + pub fn calculate_root(&self) -> Option { + let mut size = self.deposit_count; + let mut index = self.finalized.len(); + let mut deposit_root = [0; 32]; + for height in 0..DEPOSIT_TREE_DEPTH { + deposit_root = if (size & 1) == 1 { + index = index.checked_sub(1)?; + hash32_concat(self.finalized.get(index)?.as_bytes(), &deposit_root) + } else { + hash32_concat(&deposit_root, ZERO_HASHES.get(height)?) + }; + size /= 2; + } + // add mix-in-length + deposit_root = hash32_concat(&deposit_root, &int_to_bytes32(self.deposit_count)); + + Some(Hash256::from_slice(&deposit_root)) + } + pub fn is_valid(&self) -> bool { + self.calculate_root() + .map_or(false, |calculated| self.deposit_root == calculated) + } +} + +#[cfg(test)] +mod tests { + use super::*; + ssz_tests!(DepositTreeSnapshot); +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7f618dc348..4a6cc57b11 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -36,6 +36,7 @@ pub mod contribution_and_proof; pub mod deposit; pub mod deposit_data; pub mod deposit_message; +pub mod deposit_tree_snapshot; pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; @@ -120,6 +121,7 @@ pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; +pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 20147adb9f..cb50a4ee82 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -256,6 +256,7 @@ pub fn migrate_db( migrate_schema::, _, _, _>>( db, + client_config.eth1.deposit_contract_deploy_block, &client_config.get_data_dir(), from, to, diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 005a74edf6..1f869562d1 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -77,6 +77,7 @@ const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; +const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -291,6 +292,7 @@ impl ProductionValidatorClient { / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, + get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, } } else { Timeouts::set_all(slot_duration) From 8600645f65739f15937304676dd37511dbe52cf2 Mon Sep 17 00:00:00 2001 From: Divma Date: Fri, 4 Nov 2022 07:43:43 +0000 Subject: [PATCH 11/37] Fix rust 1.65 lints (#3682) ## Issue Addressed New lints for rust 1.65 ## Proposed Changes Notable change is the identification or parameters that are only used in recursion ## Additional Info na --- account_manager/src/validator/exit.rs | 2 +- account_manager/src/validator/import.rs | 4 ++-- account_manager/src/wallet/create.rs | 2 +- beacon_node/beacon_chain/src/head_tracker.rs | 2 +- beacon_node/beacon_chain/src/migrate.rs | 2 +- beacon_node/beacon_chain/src/schema_change.rs | 24 ++----------------- beacon_node/beacon_chain/src/test_utils.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 20 ++++------------ beacon_node/client/src/builder.rs | 2 -- beacon_node/http_api/src/lib.rs | 1 + .../lighthouse_network/src/service/utils.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 4 ++-- beacon_node/src/lib.rs | 10 ++------ common/filesystem/src/lib.rs | 2 +- consensus/serde_utils/src/hex_vec.rs | 2 +- consensus/serde_utils/src/u64_hex_be.rs | 2 +- consensus/tree_hash/src/merkle_hasher.rs | 2 +- consensus/types/src/config_and_preset.rs | 4 ++-- consensus/types/src/graffiti.rs | 2 +- database_manager/src/lib.rs | 1 - slasher/src/array.rs | 2 +- slasher/src/database.rs | 10 ++++---- .../execution_engine_integration/src/geth.rs | 2 +- .../src/test_rig.rs | 4 ++-- .../src/transactions.rs | 2 +- validator_client/src/http_api/api_secret.rs | 2 +- 26 files changed, 37 insertions(+), 77 deletions(-) diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index ca8cab5bd3..9e5b57a297 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -349,7 +349,7 @@ fn load_voting_keypair( password_file_path: Option<&PathBuf>, stdin_inputs: bool, ) -> Result { - let keystore = Keystore::from_json_file(&voting_keystore_path).map_err(|e| { + let keystore = Keystore::from_json_file(voting_keystore_path).map_err(|e| { format!( "Unable to read keystore JSON {:?}: {:?}", voting_keystore_path, e diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index c581866a25..8dc50a9df1 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -176,7 +176,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let password = match keystore_password_path.as_ref() { Some(path) => { - let password_from_file: ZeroizeString = fs::read_to_string(&path) + let password_from_file: ZeroizeString = fs::read_to_string(path) .map_err(|e| format!("Unable to read {:?}: {:?}", path, e))? .into(); password_from_file.without_newlines() @@ -256,7 +256,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin .ok_or_else(|| format!("Badly formatted file name: {:?}", src_keystore))?; // Copy the keystore to the new location. - fs::copy(&src_keystore, &dest_keystore) + fs::copy(src_keystore, &dest_keystore) .map_err(|e| format!("Unable to copy keystore: {:?}", e))?; // Register with slashing protection. diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index 9ebaeae5f1..accee11b5a 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -159,7 +159,7 @@ pub fn create_wallet_from_mnemonic( unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)), }; - let mgr = WalletManager::open(&wallet_base_dir) + let mgr = WalletManager::open(wallet_base_dir) .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; let wallet_password: PlainText = match wallet_password_path { diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 84c800f3b7..3fa577ff93 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -45,7 +45,7 @@ impl HeadTracker { /// Returns a `SszHeadTracker`, which contains all necessary information to restore the state /// of `Self` at some later point. pub fn to_ssz_container(&self) -> SszHeadTracker { - SszHeadTracker::from_map(&*self.0.read()) + SszHeadTracker::from_map(&self.0.read()) } /// Creates a new `Self` from the given `SszHeadTracker`, restoring `Self` to the same state of diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 1c0d9c4ed3..66f082742e 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -588,7 +588,7 @@ impl, Cold: ItemStore> BackgroundMigrator( db: Arc>, deposit_contract_deploy_block: u64, - datadir: &Path, from: SchemaVersion, to: SchemaVersion, log: Logger, @@ -42,21 +40,12 @@ pub fn migrate_schema( migrate_schema::( db.clone(), deposit_contract_deploy_block, - datadir, from, next, log.clone(), spec, )?; - migrate_schema::( - db, - deposit_contract_deploy_block, - datadir, - next, - to, - log, - spec, - ) + migrate_schema::(db, deposit_contract_deploy_block, next, to, log, spec) } // Downgrade across multiple versions by recursively migrating one step at a time. (_, _) if to.as_u64() + 1 < from.as_u64() => { @@ -64,21 +53,12 @@ pub fn migrate_schema( migrate_schema::( db.clone(), deposit_contract_deploy_block, - datadir, from, next, log.clone(), spec, )?; - migrate_schema::( - db, - deposit_contract_deploy_block, - datadir, - next, - to, - log, - spec, - ) + migrate_schema::(db, deposit_contract_deploy_block, next, to, log, spec) } // diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 3b4a62f5a9..a1c7acf173 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -356,7 +356,7 @@ where let urls: Vec = urls .iter() - .map(|s| SensitiveUrl::parse(*s)) + .map(|s| SensitiveUrl::parse(s)) .collect::>() .unwrap(); diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 06734d3e6e..95f4aadced 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -332,34 +332,22 @@ impl ValidatorMonitor { metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_SLASHED, &[id], - if validator.slashed { 1 } else { 0 }, + i64::from(validator.slashed), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_ACTIVE, &[id], - if validator.is_active_at(current_epoch) { - 1 - } else { - 0 - }, + i64::from(validator.is_active_at(current_epoch)), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_EXITED, &[id], - if validator.is_exited_at(current_epoch) { - 1 - } else { - 0 - }, + i64::from(validator.is_exited_at(current_epoch)), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_WITHDRAWABLE, &[id], - if validator.is_withdrawable_at(current_epoch) { - 1 - } else { - 0 - }, + i64::from(validator.is_withdrawable_at(current_epoch)), ); metrics::set_int_gauge( &metrics::VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH, diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c89980e6e8..36d6491a56 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -858,7 +858,6 @@ where /// Specifies that the `Client` should use a `HotColdDB` database. pub fn disk_store( mut self, - datadir: &Path, hot_path: &Path, cold_path: &Path, config: StoreConfig, @@ -888,7 +887,6 @@ where migrate_schema::>( db, deposit_contract_deploy_block, - datadir, from, to, log, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 4267a22f98..46275820ca 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1,3 +1,4 @@ +#![recursion_limit = "256"] //! This crate contains a HTTP server which serves the endpoints listed here: //! //! https://github.com/ethereum/beacon-APIs diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 2aaa46fe8b..8073ae7768 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -269,7 +269,7 @@ pub(crate) fn save_metadata_to_disk( metadata: MetaData, log: &slog::Logger, ) { - let _ = std::fs::create_dir_all(&dir); + let _ = std::fs::create_dir_all(dir); match File::create(dir.join(METADATA_FILENAME)) .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 8c335189c6..4fe5a72545 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -267,7 +267,7 @@ impl OperationPool { &prev_epoch_key, &*all_attestations, state, - &*reward_cache, + &reward_cache, total_active_balance, prev_epoch_validity_filter, spec, @@ -278,7 +278,7 @@ impl OperationPool { &curr_epoch_key, &*all_attestations, state, - &*reward_cache, + &reward_cache, total_active_balance, curr_epoch_validity_filter, spec, diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 9fd6882202..650763dcaf 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -61,7 +61,7 @@ impl ProductionBeaconNode { let client_genesis = client_config.genesis.clone(); let store_config = client_config.store.clone(); let log = context.log().clone(); - let datadir = client_config.create_data_dir()?; + let _datadir = client_config.create_data_dir()?; let db_path = client_config.create_db_path()?; let freezer_db_path = client_config.create_freezer_db_path()?; let executor = context.executor.clone(); @@ -84,13 +84,7 @@ impl ProductionBeaconNode { .runtime_context(context) .chain_spec(spec) .http_api_config(client_config.http_api.clone()) - .disk_store( - &datadir, - &db_path, - &freezer_db_path, - store_config, - log.clone(), - )?; + .disk_store(&db_path, &freezer_db_path, store_config, log.clone())?; let builder = if let Some(slasher_config) = client_config.slasher.clone() { let slasher = Arc::new( diff --git a/common/filesystem/src/lib.rs b/common/filesystem/src/lib.rs index 6305671c51..d73b7a355b 100644 --- a/common/filesystem/src/lib.rs +++ b/common/filesystem/src/lib.rs @@ -55,7 +55,7 @@ pub enum Error { /// Creates a file with `600 (-rw-------)` permissions and writes the specified bytes to file. pub fn create_with_600_perms>(path: P, bytes: &[u8]) -> Result<(), Error> { let path = path.as_ref(); - let mut file = File::create(&path).map_err(Error::UnableToCreateFile)?; + let mut file = File::create(path).map_err(Error::UnableToCreateFile)?; #[cfg(unix)] { diff --git a/consensus/serde_utils/src/hex_vec.rs b/consensus/serde_utils/src/hex_vec.rs index 60d6494434..f7f4833628 100644 --- a/consensus/serde_utils/src/hex_vec.rs +++ b/consensus/serde_utils/src/hex_vec.rs @@ -10,7 +10,7 @@ where S: Serializer, { let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); + hex_string.push_str(&hex::encode(bytes)); serializer.serialize_str(&hex_string) } diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs index dc6af0fa4c..6af8a75893 100644 --- a/consensus/serde_utils/src/u64_hex_be.rs +++ b/consensus/serde_utils/src/u64_hex_be.rs @@ -39,7 +39,7 @@ impl<'de> Visitor<'de> for QuantityVisitor { hex::decode(&format!("0{}", stripped)) .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) } else { - hex::decode(&stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) + hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) } } } diff --git a/consensus/tree_hash/src/merkle_hasher.rs b/consensus/tree_hash/src/merkle_hasher.rs index 1753eade1b..2acaf1c3b8 100644 --- a/consensus/tree_hash/src/merkle_hasher.rs +++ b/consensus/tree_hash/src/merkle_hasher.rs @@ -368,7 +368,7 @@ mod test { fn context_size() { assert_eq!( mem::size_of::(), - 232, + 224, "Halfnode size should be as expected" ); } diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index e624afe2db..b7ec015ea3 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -40,7 +40,7 @@ impl ConfigAndPreset { let extra_fields = get_extra_fields(spec); if spec.bellatrix_fork_epoch.is_some() - || fork_name == None + || fork_name.is_none() || fork_name == Some(ForkName::Merge) { let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); @@ -65,7 +65,7 @@ impl ConfigAndPreset { /// Get a hashmap of constants to add to the `PresetAndConfig` pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { - let hex_string = |value: &[u8]| format!("0x{}", hex::encode(&value)).into(); + let hex_string = |value: &[u8]| format!("0x{}", hex::encode(value)).into(); let u32_hex = |v: u32| hex_string(&v.to_le_bytes()); let u8_hex = |v: u8| hex_string(&v.to_le_bytes()); hashmap! { diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 73beb82649..2b0a645cd0 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -27,7 +27,7 @@ impl Graffiti { impl fmt::Display for Graffiti { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) + write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) } } diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index cb50a4ee82..c0023f3505 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -257,7 +257,6 @@ pub fn migrate_db( migrate_schema::, _, _, _>>( db, client_config.eth1.deposit_contract_deploy_block, - &client_config.get_data_dir(), from, to, log, diff --git a/slasher/src/array.rs b/slasher/src/array.rs index d9cb8a4ec6..4deb389124 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -188,7 +188,7 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn txn.put( Self::select_db(db), - &disk_key.to_be_bytes(), + disk_key.to_be_bytes(), &compressed_value, )?; Ok(()) diff --git a/slasher/src/database.rs b/slasher/src/database.rs index c8046c80dc..49d2b00a4c 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -301,7 +301,7 @@ impl SlasherDB { pub fn store_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( &self.databases.metadata_db, - &METADATA_VERSION_KEY, + METADATA_VERSION_KEY, &bincode::serialize(&CURRENT_SCHEMA_VERSION)?, )?; Ok(()) @@ -323,7 +323,7 @@ impl SlasherDB { pub fn store_config(&self, config: &Config, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( &self.databases.metadata_db, - &METADATA_CONFIG_KEY, + METADATA_CONFIG_KEY, &bincode::serialize(config)?, )?; Ok(()) @@ -367,7 +367,7 @@ impl SlasherDB { txn.put( &self.databases.attesters_db, &AttesterKey::new(validator_index, target_epoch, &self.config), - &CompactAttesterRecord::null().as_bytes(), + CompactAttesterRecord::null().as_bytes(), )?; } } @@ -423,7 +423,7 @@ impl SlasherDB { key: &IndexedAttestationIdKey, value: IndexedAttestationId, ) -> Result<(), Error> { - txn.put(&self.databases.indexed_attestation_id_db, key, &value)?; + txn.put(&self.databases.indexed_attestation_id_db, key, value)?; Ok(()) } @@ -579,7 +579,7 @@ impl SlasherDB { txn.put( &self.databases.attesters_db, &AttesterKey::new(validator_index, target_epoch, &self.config), - &indexed_attestation_id, + indexed_attestation_id, )?; Ok(AttesterSlashingStatus::NotSlashable) diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 467fd8b430..1b96fa9f3f 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -13,7 +13,7 @@ const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { Command::new("make") .arg("geth") - .current_dir(&repo_dir) + .current_dir(repo_dir) .output() .expect("failed to make geth") } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 515e238e97..6e9f37ff1f 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -202,8 +202,8 @@ impl TestRig { .await; // We hardcode the accounts here since some EEs start with a default unlocked account - let account1 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT1).unwrap()); - let account2 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT2).unwrap()); + let account1 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT1).unwrap()); + let account2 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT2).unwrap()); /* * Check the transition config endpoint. diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs index a8c0ab3c15..62b77d5024 100644 --- a/testing/execution_engine_integration/src/transactions.rs +++ b/testing/execution_engine_integration/src/transactions.rs @@ -30,7 +30,7 @@ pub fn transactions(account1: Address, account2: Address) -> Vec(), diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index 484ac50bd3..b42cd11edd 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -60,7 +60,7 @@ impl ApiSecret { // Create and write the secret key to file with appropriate permissions create_with_600_perms( &sk_path, - eth2_serde_utils::hex::encode(&sk.serialize()).as_bytes(), + eth2_serde_utils::hex::encode(sk.serialize()).as_bytes(), ) .map_err(|e| { format!( From 0655006e8730d0f4804d67ae37bdbbf608a8aa77 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 7 Nov 2022 06:48:31 +0000 Subject: [PATCH 12/37] Clarify error log when registering validators (#3650) ## Issue Addressed NA ## Proposed Changes Adds clarification to an error log when there is an error submitting a validator registration. There seems to be a few cases where relays return errors during validator registration, including spurious timeouts and when a validator has been very recently activated/made pending. Changing this log helps indicate that it's "just another registration error" rather than something more serious. I didn't drop this to a `WARN` since I still have hope we can eliminate these errors completely by chatting with relays and adjusting timeouts. ## Additional Info NA --- beacon_node/http_api/src/lib.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 46275820ca..1ef3c3e2a9 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2627,7 +2627,12 @@ pub fn serve( .await .map(|resp| warp::reply::json(&resp)) .map_err(|e| { - error!(log, "Error from connected relay"; "error" => ?e); + error!( + log, + "Relay error when registering validator(s)"; + "num_registrations" => filtered_registration_data.len(), + "error" => ?e + ); // Forward the HTTP status code if we are able to, otherwise fall back // to a server error. if let eth2::Error::ServerMessage(message) = e { From 253767ebc1af48893ab124c53013460098469728 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 7 Nov 2022 06:48:32 +0000 Subject: [PATCH 13/37] Update stale sections of the book (#3671) ## Issue Addressed Which issue # does this PR address? ## Proposed Changes * Add v3.2 and v3.3 to database migrations table * Remove docs on `--subscribe-all-subnets` and `--import-all-attestations` from redundancy docs * Clarify that the merge has already occurred on the merge migration page --- book/src/database-migrations.md | 2 ++ book/src/merge-migration.md | 14 +++++-------- book/src/redundancy.md | 37 ++++++++++----------------------- 3 files changed, 18 insertions(+), 35 deletions(-) diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index c31e373b48..2b0ac836a4 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -24,6 +24,8 @@ validator client or the slasher**. | v2.5.0 | Aug 2022 | v11 | yes | | v3.0.0 | Aug 2022 | v11 | yes | | v3.1.0 | Sep 2022 | v12 | yes | +| v3.2.0 | Oct 2022 | v12 | yes | +| v3.3.0 | TBD | v13 | yes | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 8596cd942c..c0ba048997 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -1,9 +1,8 @@ # Merge Migration -This document provides detail for users who want to run a merge-ready Lighthouse node. +This document provides detail for users who want to run a Lighthouse node on post-merge Ethereum. -> The merge is occurring on mainnet in September. You _must_ have a merge-ready setup by September 6 -> 2022. +> The merge occurred on mainnet in September 2022. ## Necessary Configuration @@ -27,12 +26,9 @@ engine to a merge-ready version. You must configure your node to be merge-ready before the Bellatrix fork occurs on the network on which your node is operating. -* **Mainnet**: the Bellatrix fork is scheduled for epoch 144896, September 6 2022 11:34 UTC. - You must ensure your node configuration is updated before then in order to continue following - the chain. We recommend updating your configuration now. - -* **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has already occurred. - You must have a merge-ready configuration right now. +* **Gnosis**: the Bellatrix fork has not yet been scheduled. +* **Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has + already occurred. You must have a merge-ready configuration right now. ## Connecting to an execution engine diff --git a/book/src/redundancy.md b/book/src/redundancy.md index dae7ac51fe..dcd2ecdea1 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -55,42 +55,27 @@ In our previous example, we listed `http://192.168.1.1:5052` as a redundant node. Apart from having sufficient resources, the backup node should have the following flags: -- `--staking`: starts the HTTP API server and ensures the execution chain is synced. +- `--http`: starts the HTTP API server. - `--http-address 0.0.0.0`: this allows *any* external IP address to access the HTTP server (a firewall should be configured to deny unauthorized access to port `5052`). This is only required if your backup node is on a different host. -- `--subscribe-all-subnets`: ensures that the beacon node subscribes to *all* - subnets, not just on-demand requests from validators. -- `--import-all-attestations`: ensures that the beacon node performs - aggregation on all seen attestations. +- `--execution-endpoint`: see [Merge Migration](./merge-migration.md). +- `--execution-jwt`: see [Merge Migration](./merge-migration.md). -Subsequently, one could use the following command to provide a backup beacon -node: +For example one could use the following command to provide a backup beacon node: ```bash lighthouse bn \ - --staking \ + --http \ --http-address 0.0.0.0 \ - --subscribe-all-subnets \ - --import-all-attestations + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex ``` -### Resource usage of redundant Beacon Nodes - -The `--subscribe-all-subnets` and `--import-all-attestations` flags typically -cause a significant increase in resource consumption. A doubling in CPU -utilization and RAM consumption is expected. - -The increase in resource consumption is due to the fact that the beacon node is -now processing, validating, aggregating and forwarding *all* attestations, -whereas previously it was likely only doing a fraction of this work. Without -these flags, subscription to attestation subnets and aggregation of -attestations is only performed for validators which [explicitly request -subscriptions][subscribe-api]. - -There are 64 subnets and each validator will result in a subscription to *at -least* one subnet. So, using the two aforementioned flags will result in -resource consumption akin to running 64+ validators. +Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and +`--import-all-attestations` flags. These flags are no longer required as the validator client will +now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour +can be disabled using the `--disable-run-on-all` flag for `lighthouse vc`. ## Redundant execution nodes From 84c7d8cc7006a6f1f1bb5729ab222b9f85f72727 Mon Sep 17 00:00:00 2001 From: Divma Date: Mon, 7 Nov 2022 06:48:34 +0000 Subject: [PATCH 14/37] Blocklookup data inconsistencies (#3677) ## Issue Addressed Closes #3649 ## Proposed Changes Add a regression test for the data inconsistency, catching the problem in https://github.com/sigp/lighthouse/pull/3677/commits/31e88c5533be9cf25571dd5ffbdf6e0bdc26f060 [here](https://github.com/sigp/lighthouse/actions/runs/3379894044/jobs/5612044797#step:6:2043). When a chain is sent for processing, move it to a separate collection and now the test works, yay! ## Additional Info na --- .../network/src/sync/block_lookups/mod.rs | 99 ++++++++++++------- .../src/sync/block_lookups/parent_lookup.rs | 41 ++++++-- .../network/src/sync/block_lookups/tests.rs | 97 +++++++++++++++--- 3 files changed, 181 insertions(+), 56 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 5c2bc65229..aa2694769c 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,4 +1,5 @@ use std::collections::hash_map::Entry; +use std::collections::HashMap; use std::time::Duration; use beacon_chain::{BeaconChainTypes, BlockError}; @@ -13,6 +14,7 @@ use store::{Hash256, SignedBeaconBlock}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::metrics; +use self::parent_lookup::PARENT_FAIL_TOLERANCE; use self::{ parent_lookup::{ParentLookup, VerifyError}, single_block_lookup::SingleBlockRequest, @@ -36,8 +38,11 @@ const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; pub(crate) struct BlockLookups { - /// A collection of parent block lookups. - parent_queue: SmallVec<[ParentLookup; 3]>, + /// Parent chain lookups being downloaded. + parent_lookups: SmallVec<[ParentLookup; 3]>, + + processing_parent_lookups: + HashMap, SingleBlockRequest)>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, @@ -55,7 +60,8 @@ pub(crate) struct BlockLookups { impl BlockLookups { pub fn new(log: Logger) -> Self { Self { - parent_queue: Default::default(), + parent_lookups: Default::default(), + processing_parent_lookups: Default::default(), failed_chains: LRUTimeCache::new(Duration::from_secs( FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), @@ -78,6 +84,23 @@ impl BlockLookups { return; } + if self.parent_lookups.iter_mut().any(|parent_req| { + parent_req.add_peer(&hash, &peer_id) || parent_req.contains_block(&hash) + }) { + // If the block was already downloaded, or is being downloaded in this moment, do not + // request it. + return; + } + + if self + .processing_parent_lookups + .values() + .any(|(hashes, _last_parent_request)| hashes.contains(&hash)) + { + // we are already processing this block, ignore it. + return; + } + debug!( self.log, "Searching for block"; @@ -118,8 +141,8 @@ impl BlockLookups { // Make sure this block is not already downloaded, and that neither it or its parent is // being searched for. - if self.parent_queue.iter_mut().any(|parent_req| { - parent_req.contains_block(&block) + if self.parent_lookups.iter_mut().any(|parent_req| { + parent_req.contains_block(&block_root) || parent_req.add_peer(&block_root, &peer_id) || parent_req.add_peer(&parent_root, &peer_id) }) { @@ -127,6 +150,15 @@ impl BlockLookups { return; } + if self + .processing_parent_lookups + .values() + .any(|(hashes, _peers)| hashes.contains(&block_root) || hashes.contains(&parent_root)) + { + // we are already processing this block, ignore it. + return; + } + let parent_lookup = ParentLookup::new(block_root, block, peer_id); self.request_parent(parent_lookup, cx); } @@ -207,11 +239,11 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) { let mut parent_lookup = if let Some(pos) = self - .parent_queue + .parent_lookups .iter() .position(|request| request.pending_response(id)) { - self.parent_queue.remove(pos) + self.parent_lookups.remove(pos) } else { if block.is_some() { debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id); @@ -233,13 +265,13 @@ impl BlockLookups { ) .is_ok() { - self.parent_queue.push(parent_lookup) + self.parent_lookups.push(parent_lookup) } } Ok(None) => { // Request finished successfully, nothing else to do. It will be removed after the // processing result arrives. - self.parent_queue.push(parent_lookup); + self.parent_lookups.push(parent_lookup); } Err(e) => match e { VerifyError::RootMismatch @@ -276,7 +308,7 @@ impl BlockLookups { metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -324,11 +356,11 @@ impl BlockLookups { /* Check disconnection for parent lookups */ while let Some(pos) = self - .parent_queue + .parent_lookups .iter_mut() .position(|req| req.check_peer_disconnected(peer_id).is_err()) { - let parent_lookup = self.parent_queue.remove(pos); + let parent_lookup = self.parent_lookups.remove(pos); trace!(self.log, "Parent lookup's peer disconnected"; &parent_lookup); self.request_parent(parent_lookup, cx); } @@ -342,11 +374,11 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) { if let Some(pos) = self - .parent_queue + .parent_lookups .iter() .position(|request| request.pending_response(id)) { - let mut parent_lookup = self.parent_queue.remove(pos); + let mut parent_lookup = self.parent_lookups.remove(pos); parent_lookup.download_failed(); trace!(self.log, "Parent lookup request failed"; &parent_lookup); self.request_parent(parent_lookup, cx); @@ -355,7 +387,7 @@ impl BlockLookups { }; metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -470,7 +502,7 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) { let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self - .parent_queue + .parent_lookups .iter() .enumerate() .find_map(|(pos, request)| { @@ -478,7 +510,7 @@ impl BlockLookups { .get_processing_peer(chain_hash) .map(|peer| (pos, peer)) }) { - (self.parent_queue.remove(pos), peer) + (self.parent_lookups.remove(pos), peer) } else { return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; @@ -520,13 +552,13 @@ impl BlockLookups { ); } }; - let chain_hash = parent_lookup.chain_hash(); - let blocks = parent_lookup.chain_blocks(); + let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing(); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) { Ok(_) => { - self.parent_queue.push(parent_lookup); + self.processing_parent_lookups + .insert(chain_hash, (hashes, request)); } Err(e) => { error!( @@ -580,7 +612,7 @@ impl BlockLookups { metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -590,14 +622,11 @@ impl BlockLookups { result: BatchProcessResult, cx: &mut SyncNetworkContext, ) { - let parent_lookup = if let Some(pos) = self - .parent_queue - .iter() - .position(|request| request.chain_hash() == chain_hash) - { - self.parent_queue.remove(pos) - } else { - return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); + let request = match self.processing_parent_lookups.remove(&chain_hash) { + Some((_hashes, request)) => request, + None => { + return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result) + } }; debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); @@ -609,8 +638,8 @@ impl BlockLookups { imported_blocks: _, penalty, } => { - self.failed_chains.insert(parent_lookup.chain_hash()); - for &peer_id in parent_lookup.used_peers() { + self.failed_chains.insert(chain_hash); + for peer_id in request.used_peers { cx.report_peer(peer_id, penalty, "parent_chain_failure") } } @@ -621,7 +650,7 @@ impl BlockLookups { metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -697,14 +726,14 @@ impl BlockLookups { } Ok(_) => { debug!(self.log, "Requesting parent"; &parent_lookup); - self.parent_queue.push(parent_lookup) + self.parent_lookups.push(parent_lookup) } } // We remove and add back again requests so we want this updated regardless of outcome. metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -715,6 +744,6 @@ impl BlockLookups { /// Drops all the parent chain requests and returns how many requests were dropped. pub fn drop_parent_chain_requests(&mut self) -> usize { - self.parent_queue.drain(..).len() + self.parent_lookups.drain(..).len() } } diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 38ad59ebc4..a2c2f1d1ce 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -24,7 +24,7 @@ pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>>, + downloaded_blocks: Vec>, /// Request of the last parent. current_parent_request: SingleBlockRequest, /// Id of the last parent request. @@ -53,10 +53,10 @@ pub enum RequestError { } impl ParentLookup { - pub fn contains_block(&self, block: &SignedBeaconBlock) -> bool { + pub fn contains_block(&self, block_root: &Hash256) -> bool { self.downloaded_blocks .iter() - .any(|d_block| d_block.as_ref() == block) + .any(|(root, _d_block)| root == block_root) } pub fn new( @@ -68,7 +68,7 @@ impl ParentLookup { Self { chain_hash: block_root, - downloaded_blocks: vec![block], + downloaded_blocks: vec![(block_root, block)], current_parent_request, current_parent_request_id: None, } @@ -100,7 +100,8 @@ impl ParentLookup { pub fn add_block(&mut self, block: Arc>) { let next_parent = block.parent_root(); - self.downloaded_blocks.push(block); + let current_root = self.current_parent_request.hash; + self.downloaded_blocks.push((current_root, block)); self.current_parent_request.hash = next_parent; self.current_parent_request.state = single_block_lookup::State::AwaitingDownload; self.current_parent_request_id = None; @@ -110,6 +111,32 @@ impl ParentLookup { self.current_parent_request_id == Some(req_id) } + /// Consumes the parent request and destructures it into it's parts. + #[allow(clippy::type_complexity)] + pub fn parts_for_processing( + self, + ) -> ( + Hash256, + Vec>>, + Vec, + SingleBlockRequest, + ) { + let ParentLookup { + chain_hash, + downloaded_blocks, + current_parent_request, + current_parent_request_id: _, + } = self; + let block_count = downloaded_blocks.len(); + let mut blocks = Vec::with_capacity(block_count); + let mut hashes = Vec::with_capacity(block_count); + for (hash, block) in downloaded_blocks { + blocks.push(block); + hashes.push(hash); + } + (chain_hash, blocks, hashes, current_parent_request) + } + /// Get the parent lookup's chain hash. pub fn chain_hash(&self) -> Hash256 { self.chain_hash @@ -125,10 +152,6 @@ impl ParentLookup { self.current_parent_request_id = None; } - pub fn chain_blocks(&mut self) -> Vec>> { - std::mem::take(&mut self.downloaded_blocks) - } - /// Verifies that the received block is what we requested. If so, parent lookup now waits for /// the processing result of the block. pub fn verify_block( diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 64a1a6e836..8ade622f8d 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -259,7 +259,7 @@ fn test_single_block_lookup_becomes_parent_request() { assert_eq!(bl.single_block_lookups.len(), 0); rig.expect_parent_request(); rig.expect_empty_network(); - assert_eq!(bl.parent_queue.len(), 1); + assert_eq!(bl.parent_lookups.len(), 1); } #[test] @@ -287,7 +287,7 @@ fn test_parent_lookup_happy_path() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -324,7 +324,7 @@ fn test_parent_lookup_wrong_response() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -356,7 +356,7 @@ fn test_parent_lookup_empty_response() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -387,7 +387,7 @@ fn test_parent_lookup_rpc_failure() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -419,11 +419,11 @@ fn test_parent_lookup_too_many_attempts() { } } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i)); + assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); } } - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -450,11 +450,11 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { rig.expect_penalty(); } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i)); + assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); } } - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); assert!(!bl.failed_chains.contains(&block_hash)); assert!(!bl.failed_chains.contains(&parent.canonical_root())); } @@ -491,7 +491,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { } assert!(bl.failed_chains.contains(&block_hash)); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -545,7 +545,7 @@ fn test_parent_lookup_disconnection() { &mut cx, ); bl.peer_disconnected(&peer_id, &mut cx); - assert!(bl.parent_queue.is_empty()); + assert!(bl.parent_lookups.is_empty()); } #[test] @@ -598,5 +598,78 @@ fn test_parent_lookup_ignored_response() { // Return an Ignored result. The request should be dropped bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx); rig.expect_empty_network(); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); +} + +/// This is a regression test. +#[test] +fn test_same_chain_race_condition() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(Some(Level::Debug)); + + #[track_caller] + fn parent_lookups_consistency(bl: &BlockLookups) { + let hashes: Vec<_> = bl + .parent_lookups + .iter() + .map(|req| req.chain_hash()) + .collect(); + let expected = hashes.len(); + assert_eq!( + expected, + hashes + .into_iter() + .collect::>() + .len(), + "duplicated chain hashes in parent queue" + ) + } + // if we use one or two blocks it will match on the hash or the parent hash, so make a longer + // chain. + let depth = 4; + let mut blocks = Vec::>>::with_capacity(depth); + while blocks.len() < depth { + let parent = blocks + .last() + .map(|b| b.canonical_root()) + .unwrap_or_else(Hash256::random); + let block = Arc::new(rig.block_with_parent(parent)); + blocks.push(block); + } + + let peer_id = PeerId::random(); + let trigger_block = blocks.pop().unwrap(); + let chain_hash = trigger_block.canonical_root(); + bl.search_parent(chain_hash, trigger_block.clone(), peer_id, &mut cx); + + for (i, block) in blocks.into_iter().rev().enumerate() { + let id = rig.expect_parent_request(); + // the block + bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx); + // the stream termination + bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + // the processing request + rig.expect_block_process(); + // the processing result + if i + 2 == depth { + // one block was removed + bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx) + } else { + bl.parent_block_processed(chain_hash, BlockError::ParentUnknown(block).into(), &mut cx) + } + parent_lookups_consistency(&bl) + } + + // Processing succeeds, now the rest of the chain should be sent for processing. + rig.expect_parent_chain_process(); + + // Try to get this block again while the chain is being processed. We should not request it again. + let peer_id = PeerId::random(); + bl.search_parent(chain_hash, trigger_block, peer_id, &mut cx); + parent_lookups_consistency(&bl); + + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); + assert_eq!(bl.parent_lookups.len(), 0); } From 9d6209725f3fa18236d0fbc72504dc22cf456ed9 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 8 Nov 2022 01:58:18 +0000 Subject: [PATCH 15/37] Added Merkle Proof Generation for Beacon State (#3674) ## Issue Addressed This PR addresses partially #3651 ## Proposed Changes This PR adds the following methods: * a new method to trait `TreeHash`, `hash_tree_leaves` which returns all the Merkle leaves of the ssz object. * a new method to `BeaconState`: `compute_merkle_proof` which generates a specific merkle proof for given depth and index by using the `hash_tree_leaves` as leaves function. ## Additional Info Now here is some rationale on why I decided to go down this route: adding a new function to commonly used trait is a pain but was necessary to make sure we have all merkle leaves for every object, that is why I just added `hash_tree_leaves` in the trait and not `compute_merkle_proof` as well. although it would make sense it gives us code duplication/harder review time and we just need it from one specific object in one specific usecase so not worth the effort YET. In my humble opinion. Co-authored-by: Michael Sproul --- Cargo.lock | 1 + consensus/types/Cargo.toml | 1 + consensus/types/src/beacon_state.rs | 59 +++++ .../types/src/beacon_state/tree_hash_cache.rs | 204 ++++++++---------- consensus/types/src/light_client_bootstrap.rs | 10 +- .../types/src/light_client_finality_update.rs | 7 +- consensus/types/src/light_client_update.rs | 13 +- testing/ef_tests/src/cases.rs | 2 + .../src/cases/merkle_proof_validity.rs | 83 +++++++ testing/ef_tests/src/handler.rs | 24 +++ testing/ef_tests/tests/tests.rs | 5 + 11 files changed, 277 insertions(+), 132 deletions(-) create mode 100644 testing/ef_tests/src/cases/merkle_proof_validity.rs diff --git a/Cargo.lock b/Cargo.lock index 6d65ccb48c..c759c46f36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6908,6 +6908,7 @@ dependencies = [ "lazy_static", "log", "maplit", + "merkle_proof", "parking_lot 0.12.1", "rand 0.8.5", "rand_xorshift", diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index d1b2ae1823..1ccc8dba8b 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -9,6 +9,7 @@ name = "benches" harness = false [dependencies] +merkle_proof = { path = "../../consensus/merkle_proof" } bls = { path = "../../crypto/bls" } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a5d00cdf2d..a6b913bcb9 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -124,6 +124,8 @@ pub enum Error { current_epoch: Epoch, epoch: Epoch, }, + IndexNotSupported(usize), + MerkleTreeError(merkle_proof::MerkleTreeError), } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -1669,6 +1671,57 @@ impl BeaconState { }; Ok(sync_committee) } + + pub fn compute_merkle_proof( + &mut self, + generalized_index: usize, + ) -> Result, Error> { + // 1. Convert generalized index to field index. + let field_index = match generalized_index { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX + | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + generalized_index + .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .ok_or(Error::IndexNotSupported(generalized_index))? + } + light_client_update::FINALIZED_ROOT_INDEX => { + // Finalized root is the right child of `finalized_checkpoint`, divide by two to get + // the generalized index of `state.finalized_checkpoint`. + let finalized_checkpoint_generalized_index = generalized_index / 2; + // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches + // position of `finalized_checkpoint` in `BeaconState`. + finalized_checkpoint_generalized_index + .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .ok_or(Error::IndexNotSupported(generalized_index))? + } + _ => return Err(Error::IndexNotSupported(generalized_index)), + }; + + // 2. Get all `BeaconState` leaves. + let cache = self.tree_hash_cache_mut().take(); + let leaves = if let Some(mut cache) = cache { + cache.recalculate_tree_hash_leaves(self)? + } else { + return Err(Error::TreeHashCacheNotInitialized); + }; + + // 3. Make deposit tree. + // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). + let depth = light_client_update::CURRENT_SYNC_COMMITTEE_PROOF_LEN; + let tree = merkle_proof::MerkleTree::create(&leaves, depth); + let (_, mut proof) = tree.generate_proof(field_index, depth)?; + + // 4. If we're proving the finalized root, patch in the finalized epoch to complete the proof. + if generalized_index == light_client_update::FINALIZED_ROOT_INDEX { + proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); + } + + Ok(proof) + } } impl From for Error { @@ -1701,6 +1754,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: merkle_proof::MerkleTreeError) -> Error { + Error::MerkleTreeError(e) + } +} + impl From for Error { fn from(e: ArithError) -> Error { Error::ArithError(e) diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index e67d4096dd..2fc56bdc01 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -18,7 +18,7 @@ use tree_hash::{mix_in_length, MerkleHasher, TreeHash}; /// /// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the /// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.** -const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; +pub const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; /// The number of nodes in the Merkle tree of a validator record. const NODES_PER_VALIDATOR: usize = 15; @@ -210,6 +210,90 @@ impl BeaconTreeHashCacheInner { } } + pub fn recalculate_tree_hash_leaves( + &mut self, + state: &BeaconState, + ) -> Result, Error> { + let mut leaves = vec![ + // Genesis data leaves. + state.genesis_time().tree_hash_root(), + state.genesis_validators_root().tree_hash_root(), + // Current fork data leaves. + state.slot().tree_hash_root(), + state.fork().tree_hash_root(), + state.latest_block_header().tree_hash_root(), + // Roots leaves. + state + .block_roots() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?, + state + .state_roots() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?, + state + .historical_roots() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?, + // Eth1 Data leaves. + state.eth1_data().tree_hash_root(), + self.eth1_data_votes.recalculate_tree_hash_root(state)?, + state.eth1_deposit_index().tree_hash_root(), + // Validator leaves. + self.validators + .recalculate_tree_hash_root(state.validators())?, + state + .balances() + .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?, + state + .randao_mixes() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?, + state + .slashings() + .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, + ]; + // Participation + if let BeaconState::Base(state) = state { + leaves.push(state.previous_epoch_attestations.tree_hash_root()); + leaves.push(state.current_epoch_attestations.tree_hash_root()); + } else { + leaves.push( + self.previous_epoch_participation + .recalculate_tree_hash_root(&ParticipationList::new( + state.previous_epoch_participation()?, + ))?, + ); + leaves.push( + self.current_epoch_participation + .recalculate_tree_hash_root(&ParticipationList::new( + state.current_epoch_participation()?, + ))?, + ); + } + // Checkpoint leaves + leaves.push(state.justification_bits().tree_hash_root()); + leaves.push(state.previous_justified_checkpoint().tree_hash_root()); + leaves.push(state.current_justified_checkpoint().tree_hash_root()); + leaves.push(state.finalized_checkpoint().tree_hash_root()); + // Inactivity & light-client sync committees (Altair and later). + if let Ok(inactivity_scores) = state.inactivity_scores() { + leaves.push( + self.inactivity_scores + .recalculate_tree_hash_root(inactivity_scores)?, + ); + } + if let Ok(current_sync_committee) = state.current_sync_committee() { + leaves.push(current_sync_committee.tree_hash_root()); + } + + if let Ok(next_sync_committee) = state.next_sync_committee() { + leaves.push(next_sync_committee.tree_hash_root()); + } + + // Execution payload (merge and later). + if let Ok(payload_header) = state.latest_execution_payload_header() { + leaves.push(payload_header.tree_hash_root()); + } + Ok(leaves) + } + /// Updates the cache and returns the tree hash root for the given `state`. /// /// The provided `state` should be a descendant of the last `state` given to this function, or @@ -246,121 +330,9 @@ impl BeaconTreeHashCacheInner { let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES); - hasher.write(state.genesis_time().tree_hash_root().as_bytes())?; - hasher.write(state.genesis_validators_root().tree_hash_root().as_bytes())?; - hasher.write(state.slot().tree_hash_root().as_bytes())?; - hasher.write(state.fork().tree_hash_root().as_bytes())?; - hasher.write(state.latest_block_header().tree_hash_root().as_bytes())?; - hasher.write( - state - .block_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)? - .as_bytes(), - )?; - hasher.write( - state - .state_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)? - .as_bytes(), - )?; - hasher.write( - state - .historical_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)? - .as_bytes(), - )?; - hasher.write(state.eth1_data().tree_hash_root().as_bytes())?; - hasher.write( - self.eth1_data_votes - .recalculate_tree_hash_root(state)? - .as_bytes(), - )?; - hasher.write(state.eth1_deposit_index().tree_hash_root().as_bytes())?; - hasher.write( - self.validators - .recalculate_tree_hash_root(state.validators())? - .as_bytes(), - )?; - hasher.write( - state - .balances() - .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)? - .as_bytes(), - )?; - hasher.write( - state - .randao_mixes() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)? - .as_bytes(), - )?; - hasher.write( - state - .slashings() - .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)? - .as_bytes(), - )?; - - // Participation - if let BeaconState::Base(state) = state { - hasher.write( - state - .previous_epoch_attestations - .tree_hash_root() - .as_bytes(), - )?; - hasher.write(state.current_epoch_attestations.tree_hash_root().as_bytes())?; - } else { - hasher.write( - self.previous_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.previous_epoch_participation()?, - ))? - .as_bytes(), - )?; - hasher.write( - self.current_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.current_epoch_participation()?, - ))? - .as_bytes(), - )?; - } - - hasher.write(state.justification_bits().tree_hash_root().as_bytes())?; - hasher.write( - state - .previous_justified_checkpoint() - .tree_hash_root() - .as_bytes(), - )?; - hasher.write( - state - .current_justified_checkpoint() - .tree_hash_root() - .as_bytes(), - )?; - hasher.write(state.finalized_checkpoint().tree_hash_root().as_bytes())?; - - // Inactivity & light-client sync committees (Altair and later). - if let Ok(inactivity_scores) = state.inactivity_scores() { - hasher.write( - self.inactivity_scores - .recalculate_tree_hash_root(inactivity_scores)? - .as_bytes(), - )?; - } - - if let Ok(current_sync_committee) = state.current_sync_committee() { - hasher.write(current_sync_committee.tree_hash_root().as_bytes())?; - } - - if let Ok(next_sync_committee) = state.next_sync_committee() { - hasher.write(next_sync_committee.tree_hash_root().as_bytes())?; - } - - // Execution payload (merge and later). - if let Ok(payload_header) = state.latest_execution_payload_header() { - hasher.write(payload_header.tree_hash_root().as_bytes())?; + let leaves = self.recalculate_tree_hash_leaves(state)?; + for leaf in leaves { + hasher.write(leaf.as_bytes())?; } let root = hasher.finish()?; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 406136d542..d2a46c04a4 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -21,17 +21,15 @@ pub struct LightClientBootstrap { } impl LightClientBootstrap { - pub fn from_beacon_state(beacon_state: BeaconState) -> Result { + pub fn from_beacon_state(beacon_state: &mut BeaconState) -> Result { let mut header = beacon_state.latest_block_header().clone(); header.state_root = beacon_state.tree_hash_root(); + let current_sync_committee_branch = + beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?; Ok(LightClientBootstrap { header, current_sync_committee: beacon_state.current_sync_committee()?.clone(), - /// TODO(Giulio2002): Generate Merkle Proof, this is just empty hashes - current_sync_committee_branch: FixedVector::new(vec![ - Hash256::zero(); - CURRENT_SYNC_COMMITTEE_PROOF_LEN - ])?, + current_sync_committee_branch: FixedVector::new(current_sync_committee_branch)?, }) } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index c93d15a1a0..fe26c0fa3e 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -31,7 +31,7 @@ impl LightClientFinalityUpdate { chain_spec: ChainSpec, beacon_state: BeaconState, block: BeaconBlock, - attested_state: BeaconState, + attested_state: &mut BeaconState, finalized_block: BeaconBlock, ) -> Result { let altair_fork_epoch = chain_spec @@ -60,11 +60,12 @@ impl LightClientFinalityUpdate { if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { return Err(Error::InvalidFinalizedBlock); } - // TODO(Giulio2002): compute proper merkle proofs. + + let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { attested_header: attested_header, finalized_header: finalized_header, - finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), }) diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 38609cf1bc..7d01f39bfc 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -77,7 +77,7 @@ impl LightClientUpdate { chain_spec: ChainSpec, beacon_state: BeaconState, block: BeaconBlock, - attested_state: BeaconState, + attested_state: &mut BeaconState, finalized_block: BeaconBlock, ) -> Result { let altair_fork_epoch = chain_spec @@ -114,16 +114,15 @@ impl LightClientUpdate { if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { return Err(Error::InvalidFinalizedBlock); } - // TODO(Giulio2002): compute proper merkle proofs. + let next_sync_committee_branch = + attested_state.compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)?; + let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { attested_header, next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(vec![ - Hash256::zero(); - NEXT_SYNC_COMMITTEE_PROOF_LEN - ])?, + next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, finalized_header, - finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), }) diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index ae70f1e07e..216912a4f1 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -18,6 +18,7 @@ mod fork; mod fork_choice; mod genesis_initialization; mod genesis_validity; +mod merkle_proof_validity; mod operations; mod rewards; mod sanity_blocks; @@ -41,6 +42,7 @@ pub use epoch_processing::*; pub use fork::ForkTest; pub use genesis_initialization::*; pub use genesis_validity::*; +pub use merkle_proof_validity::*; pub use operations::*; pub use rewards::RewardsTest; pub use sanity_blocks::*; diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs new file mode 100644 index 0000000000..3a6f4acf1e --- /dev/null +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -0,0 +1,83 @@ +use super::*; +use crate::decode::{ssz_decode_state, yaml_decode_file}; +use serde_derive::Deserialize; +use std::path::Path; +use tree_hash::Hash256; +use types::{BeaconState, EthSpec, ForkName}; + +#[derive(Debug, Clone, Deserialize)] +pub struct Metadata { + #[serde(rename(deserialize = "description"))] + _description: String, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct MerkleProof { + pub leaf: Hash256, + pub leaf_index: usize, + pub branch: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct MerkleProofValidity { + pub metadata: Option, + pub state: BeaconState, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for MerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); + let state = ssz_decode_state(&path.join("state.ssz_snappy"), spec)?; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + + Ok(Self { + metadata, + state, + merkle_proof, + }) + } +} + +impl Case for MerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let mut state = self.state.clone(); + state.initialize_tree_hash_cache(); + let proof = match state.compute_merkle_proof(self.merkle_proof.leaf_index) { + Ok(proof) => proof, + Err(_) => { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )) + } + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merke proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + Ok(()) + } +} diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index dd5ed82da7..13f70fea71 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -617,6 +617,30 @@ impl Handler for GenesisInitializationHandler { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct MerkleProofValidityHandler(PhantomData); + +impl Handler for MerkleProofValidityHandler { + type Case = cases::MerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "single_merkle_proof".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name != ForkName::Base + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct OperationsHandler(PhantomData<(E, O)>); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 28c57028cf..87a6bec71b 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -465,6 +465,11 @@ fn genesis_validity() { // Note: there are no genesis validity tests for mainnet } +#[test] +fn merkle_proof_validity() { + MerkleProofValidityHandler::::default().run(); +} + #[test] fn rewards() { for handler in &["basic", "leak", "random"] { From 266d7652854ba7dd22947396a1b6c7fcfe6eed50 Mon Sep 17 00:00:00 2001 From: tim gretler Date: Wed, 9 Nov 2022 05:37:09 +0000 Subject: [PATCH 16/37] Register blocks in validator monitor (#3635) ## Issue Addressed Closes #3460 ## Proposed Changes `blocks` and `block_min_delay` are never updated in the epoch summary Co-authored-by: Michael Sproul --- .../beacon_chain/src/validator_monitor.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 95f4aadced..f9203f74bf 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -109,6 +109,11 @@ impl EpochSummary { } } + pub fn register_block(&mut self, delay: Duration) { + self.blocks += 1; + Self::update_if_lt(&mut self.block_min_delay, delay); + } + pub fn register_unaggregated_attestation(&mut self, delay: Duration) { self.attestations += 1; Self::update_if_lt(&mut self.attestation_min_delay, delay); @@ -613,13 +618,6 @@ impl ValidatorMonitor { Ok(()) } - fn get_validator_id(&self, validator_index: u64) -> Option<&str> { - self.indices - .get(&validator_index) - .and_then(|pubkey| self.validators.get(pubkey)) - .map(|validator| validator.id.as_str()) - } - fn get_validator(&self, validator_index: u64) -> Option<&MonitoredValidator> { self.indices .get(&validator_index) @@ -685,7 +683,9 @@ impl ValidatorMonitor { block_root: Hash256, slot_clock: &S, ) { - if let Some(id) = self.get_validator_id(block.proposer_index()) { + let epoch = block.slot().epoch(T::slots_per_epoch()); + if let Some(validator) = self.get_validator(block.proposer_index()) { + let id = &validator.id; let delay = get_block_delay_ms(seen_timestamp, block, slot_clock); metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, &[src, id]); @@ -704,6 +704,8 @@ impl ValidatorMonitor { "src" => src, "validator" => %id, ); + + validator.with_epoch_summary(epoch, |summary| summary.register_block(delay)); } } From d99bfcf1a5647744be471111f2316d0e4b2a3dcb Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 11 Nov 2022 00:38:27 +0000 Subject: [PATCH 17/37] Blinded block and RANDAO APIs (#3571) ## Issue Addressed https://github.com/ethereum/beacon-APIs/pull/241 https://github.com/ethereum/beacon-APIs/pull/242 ## Proposed Changes Implement two new endpoints for fetching blinded blocks and RANDAO mixes. Co-authored-by: realbigsean --- beacon_node/http_api/src/lib.rs | 79 ++++++++++++++++++++ beacon_node/http_api/tests/tests.rs | 110 ++++++++++++++++++++++++++++ common/eth2/src/lib.rs | 97 ++++++++++++++++++++++++ common/eth2/src/types.rs | 10 +++ 4 files changed, 296 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1ef3c3e2a9..01cc63ecea 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -891,6 +891,37 @@ pub fn serve( }, ); + // GET beacon/states/{state_id}/randao?epoch + let get_beacon_state_randao = beacon_states_path + .clone() + .and(warp::path("randao")) + .and(warp::query::()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, chain: Arc>, query: api_types::RandaoQuery| { + blocking_json_task(move || { + let (randao, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); + let randao = *state.get_randao_mix(epoch).map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "epoch out of range: {e:?}" + )) + })?; + Ok((randao, execution_optimistic)) + }, + )?; + + Ok( + api_types::GenericResponse::from(api_types::RandaoMix { randao }) + .add_execution_optimistic(execution_optimistic), + ) + }) + }, + ); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -1167,6 +1198,51 @@ pub fn serve( }) }); + // GET beacon/blinded_blocks/{block_id} + let get_beacon_blinded_block = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(block_id_or_err) + .and(chain_filter.clone()) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and_then( + |block_id: BlockId, + chain: Arc>, + accept_header: Option| { + blocking_task(move || { + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(block.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + execution_optimistic_fork_versioned_response( + V2, + fork_name, + execution_optimistic, + block, + ) + .map(|res| warp::reply::json(&res).into_response()) + } + } + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ); + /* * beacon/pool */ @@ -3164,10 +3240,12 @@ pub fn serve( .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_committees.boxed()) .or(get_beacon_state_sync_committees.boxed()) + .or(get_beacon_state_randao.boxed()) .or(get_beacon_headers.boxed()) .or(get_beacon_headers_block_id.boxed()) .or(get_beacon_block.boxed()) .or(get_beacon_block_attestations.boxed()) + .or(get_beacon_blinded_block.boxed()) .or(get_beacon_block_root.boxed()) .or(get_beacon_pool_attestations.boxed()) .or(get_beacon_pool_attester_slashings.boxed()) @@ -3212,6 +3290,7 @@ pub fn serve( .or(get_lighthouse_merge_readiness.boxed()) .or(get_events.boxed()), ) + .boxed() .or(warp::post().and( post_beacon_blocks .boxed() diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index ff664d6ff0..2e795e522d 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -745,6 +745,36 @@ impl ApiTester { self } + pub async fn test_beacon_states_randao(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); + + let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); + let result = self + .client + .get_beacon_states_randao(state_id.0, epoch_opt) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let randao_mix = state + .get_randao_mix(state.slot().epoch(E::slots_per_epoch())) + .unwrap(); + + assert_eq!(result.unwrap().randao, *randao_mix); + } + + self + } + pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -1016,6 +1046,82 @@ impl ApiTester { self } + pub async fn test_beacon_blinded_blocks(self) -> Self { + for block_id in self.interesting_block_ids() { + let expected = block_id + .blinded_block(&self.chain) + .ok() + .map(|(block, _execution_optimistic)| block); + + if let CoreBlockId::Slot(slot) = block_id.0 { + if expected.is_none() { + assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); + } else { + assert!(!SKIPPED_SLOTS.contains(&slot.as_u64())); + } + } + + // Check the JSON endpoint. + let json_result = self + .client + .get_beacon_blinded_blocks(block_id.0) + .await + .unwrap(); + + if let (Some(json), Some(expected)) = (&json_result, &expected) { + assert_eq!(&json.data, expected, "{:?}", block_id); + assert_eq!( + json.version, + Some(expected.fork_name(&self.chain.spec).unwrap()) + ); + } else { + assert_eq!(json_result, None); + assert_eq!(expected, None); + } + + // Check the SSZ endpoint. + let ssz_result = self + .client + .get_beacon_blinded_blocks_ssz(block_id.0, &self.chain.spec) + .await + .unwrap(); + assert_eq!(ssz_result.as_ref(), expected.as_ref(), "{:?}", block_id); + + // Check that version headers are provided. + let url = self + .client + .get_beacon_blinded_blocks_path(block_id.0) + .unwrap(); + + let builders: Vec RequestBuilder> = vec![ + |b| b, + |b| b.accept(Accept::Ssz), + |b| b.accept(Accept::Json), + |b| b.accept(Accept::Any), + ]; + + for req_builder in builders { + let raw_res = self + .client + .get_response(url.clone(), req_builder) + .await + .optional() + .unwrap(); + if let (Some(raw_res), Some(expected)) = (&raw_res, &expected) { + assert_eq!( + raw_res.fork_name_from_header().unwrap(), + Some(expected.fork_name(&self.chain.spec).unwrap()) + ); + } else { + assert!(raw_res.is_none()); + assert_eq!(expected, None); + } + } + } + + self + } + pub async fn test_beacon_blocks_attestations(self) -> Self { for block_id in self.interesting_block_ids() { let result = self @@ -3696,6 +3802,8 @@ async fn beacon_get() { .await .test_beacon_states_validator_id() .await + .test_beacon_states_randao() + .await .test_beacon_headers_all_slots() .await .test_beacon_headers_all_parents() @@ -3704,6 +3812,8 @@ async fn beacon_get() { .await .test_beacon_blocks() .await + .test_beacon_blinded_blocks() + .await .test_beacon_blocks_attestations() .await .test_beacon_blocks_root() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index a2fb082a35..58b4c88b3c 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -518,6 +518,29 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET beacon/states/{state_id}/randao?epoch` + pub async fn get_beacon_states_randao( + &self, + state_id: StateId, + epoch: Option, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("randao"); + + if let Some(epoch) = epoch { + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + } + + self.get_opt(path).await + } + /// `GET beacon/states/{state_id}/validators/{validator_id}` /// /// Returns `Ok(None)` on a 404 error. @@ -636,6 +659,17 @@ impl BeaconNodeHttpClient { Ok(path) } + /// Path for `v1/beacon/blinded_blocks/{block_id}` + pub fn get_beacon_blinded_blocks_path(&self, block_id: BlockId) -> Result { + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blinded_blocks") + .push(&block_id.to_string()); + Ok(path) + } + /// `GET v2/beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. @@ -680,6 +714,51 @@ impl BeaconNodeHttpClient { })) } + /// `GET v1/beacon/blinded_blocks/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blinded_blocks( + &self, + block_id: BlockId, + ) -> Result>>, Error> + { + let path = self.get_beacon_blinded_blocks_path(block_id)?; + let response = match self.get_response(path, |b| b).await.optional()? { + Some(res) => res, + None => return Ok(None), + }; + + // If present, use the fork provided in the headers to decode the block. Gracefully handle + // missing and malformed fork names by falling back to regular deserialisation. + let (block, version, execution_optimistic) = match response.fork_name_from_header() { + Ok(Some(fork_name)) => { + let (data, (version, execution_optimistic)) = + map_fork_name_with!(fork_name, SignedBlindedBeaconBlock, { + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, (version, execution_optimistic)) + }); + (data, version, execution_optimistic) + } + Ok(None) | Err(_) => { + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, version, execution_optimistic) + } + }; + Ok(Some(ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data: block, + })) + } + /// `GET v1/beacon/blocks` (LEGACY) /// /// Returns `Ok(None)` on a 404 error. @@ -714,6 +793,24 @@ impl BeaconNodeHttpClient { .transpose() } + /// `GET beacon/blinded_blocks/{block_id}` as SSZ + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blinded_blocks_ssz( + &self, + block_id: BlockId, + spec: &ChainSpec, + ) -> Result>, Error> { + let path = self.get_beacon_blinded_blocks_path(block_id)?; + + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_beacon_blocks_ssz) + .await? + .map(|bytes| { + SignedBlindedBeaconBlock::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz) + }) + .transpose() + } + /// `GET beacon/blocks/{block_id}/root` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index e657358003..7012972460 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -455,6 +455,11 @@ pub struct SyncCommitteesQuery { pub epoch: Option, } +#[derive(Serialize, Deserialize)] +pub struct RandaoQuery { + pub epoch: Option, +} + #[derive(Serialize, Deserialize)] pub struct AttestationPoolQuery { pub slot: Option, @@ -486,6 +491,11 @@ pub struct SyncCommitteeByValidatorIndices { pub validator_aggregates: Vec, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RandaoMix { + pub randao: Hash256, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubcommittee { From c591fcd20179a8fd8cb3c601d949513a193c3351 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Fri, 11 Nov 2022 00:38:28 +0000 Subject: [PATCH 18/37] add checkpoint-sync-url-timeout flag (#3710) ## Issue Addressed #3702 Which issue # does this PR address? #3702 ## Proposed Changes Added checkpoint-sync-url-timeout flag to cli. Added timeout field to ClientGenesis::CheckpointSyncUrl to utilize timeout set ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. Co-authored-by: GeemoCandama <104614073+GeemoCandama@users.noreply.github.com> Co-authored-by: Michael Sproul --- beacon_node/beacon_chain/src/chain_config.rs | 3 +++ beacon_node/client/src/builder.rs | 11 ++++++----- beacon_node/src/cli.rs | 8 ++++++++ beacon_node/src/config.rs | 2 ++ lighthouse/tests/beacon_node.rs | 19 +++++++++++++++++++ 5 files changed, 38 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 5e16a29cf3..286cc17a96 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -45,6 +45,8 @@ pub struct ChainConfig { pub paranoid_block_proposal: bool, /// Whether to strictly count unrealized justified votes. pub count_unrealized_full: CountUnrealizedFull, + /// Optionally set timeout for calls to checkpoint sync endpoint. + pub checkpoint_sync_url_timeout: u64, } impl Default for ChainConfig { @@ -65,6 +67,7 @@ impl Default for ChainConfig { always_reset_payload_statuses: false, paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), + checkpoint_sync_url_timeout: 60, } } } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 36d6491a56..75b865407e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -40,9 +40,6 @@ use types::{ /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000; -/// Timeout for checkpoint sync HTTP requests. -pub const CHECKPOINT_SYNC_HTTP_TIMEOUT: Duration = Duration::from_secs(60); - /// Builds a `Client` instance. /// /// ## Notes @@ -273,8 +270,12 @@ where "remote_url" => %url, ); - let remote = - BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT)); + let remote = BeaconNodeHttpClient::new( + url, + Timeouts::set_all(Duration::from_secs( + config.chain.checkpoint_sync_url_timeout, + )), + ); let slots_per_epoch = TEthSpec::slots_per_epoch(); let deposit_snapshot = if config.sync_eth1_chain { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 81a7c6bbeb..16a6794f43 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -714,6 +714,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .conflicts_with("checkpoint-state") ) + .arg( + Arg::with_name("checkpoint-sync-url-timeout") + .long("checkpoint-sync-url-timeout") + .help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.") + .value_name("SECONDS") + .takes_value(true) + .default_value("60") + ) .arg( Arg::with_name("reconstruct-historic-states") .long("reconstruct-historic-states") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 3b94c31290..6af753afea 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -441,6 +441,8 @@ pub fn get_config( .extend_from_slice(boot_nodes) } } + client_config.chain.checkpoint_sync_url_timeout = + clap_utils::parse_required::(cli_args, "checkpoint-sync-url-timeout")?; client_config.genesis = if let Some(genesis_state_bytes) = eth2_network_config.genesis_state_bytes.clone() diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index b1498f109d..f24ba6895e 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -132,6 +132,25 @@ fn fork_choice_before_proposal_timeout_zero() { .with_config(|config| assert_eq!(config.chain.fork_choice_before_proposal_timeout_ms, 0)); } +#[test] +fn checkpoint_sync_url_timeout_flag() { + CommandLineTest::new() + .flag("checkpoint-sync-url-timeout", Some("300")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.checkpoint_sync_url_timeout, 300); + }); +} + +#[test] +fn checkpoint_sync_url_timeout_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.checkpoint_sync_url_timeout, 60); + }); +} + #[test] fn paranoid_block_proposal_default() { CommandLineTest::new() From 3be41006a6a0d4ddf44179f1d86ccf0e3e2d0100 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 11 Nov 2022 11:03:18 +0000 Subject: [PATCH 19/37] Add --light-client-server flag and state cache utils (#3714) ## Issue Addressed Part of https://github.com/sigp/lighthouse/issues/3651. ## Proposed Changes Add a flag for enabling the light client server, which should be checked before gossip/RPC traffic is processed (e.g. https://github.com/sigp/lighthouse/pull/3693, https://github.com/sigp/lighthouse/pull/3711). The flag is available at runtime from `beacon_chain.config.enable_light_client_server`. Additionally, a new method `BeaconChain::with_mutable_state_for_block` is added which I envisage being used for computing light client updates. Unfortunately its performance will be quite poor on average because it will only run quickly with access to the tree hash cache. Each slot the tree hash cache is only available for a brief window of time between the head block being processed and the state advance at 9s in the slot. When the state advance happens the cache is moved and mutated to get ready for the next slot, which makes it no longer useful for merkle proofs related to the head block. Rather than spend more time trying to optimise this I think we should continue prototyping with this code, and I'll make sure `tree-states` is ready to ship before we enable the light client server in prod (cf. https://github.com/sigp/lighthouse/pull/3206). ## Additional Info I also fixed a bug in the implementation of `BeaconState::compute_merkle_proof` whereby the tree hash cache was moved with `.take()` but never put back with `.restore()`. --- beacon_node/beacon_chain/src/beacon_chain.rs | 40 +++++++++++++++++++ beacon_node/beacon_chain/src/chain_config.rs | 3 ++ .../beacon_chain/src/snapshot_cache.rs | 21 ++++++++++ beacon_node/src/cli.rs | 7 ++++ beacon_node/src/config.rs | 3 ++ consensus/types/src/beacon_state.rs | 12 +++--- lighthouse/tests/beacon_node.rs | 15 +++++++ testing/ef_tests/check_all_files_accessed.py | 2 - .../src/cases/merkle_proof_validity.rs | 4 ++ 9 files changed, 99 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b23dd30de0..6f409fdadc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -997,6 +997,46 @@ impl BeaconChain { Ok(self.store.get_state(state_root, slot)?) } + /// Run a function with mutable access to a state for `block_root`. + /// + /// The primary purpose of this function is to borrow a state with its tree hash cache + /// from the snapshot cache *without moving it*. This means that calls to this function should + /// be kept to an absolute minimum, because holding the snapshot cache lock has the ability + /// to delay block import. + /// + /// If there is no appropriate state in the snapshot cache then one will be loaded from disk. + /// If no state is found on disk then `Ok(None)` will be returned. + /// + /// The 2nd parameter to the closure is a bool indicating whether the snapshot cache was used, + /// which can inform logging/metrics. + /// + /// NOTE: the medium-term plan is to delete this function and the snapshot cache in favour + /// of `tree-states`, where all caches are CoW and everything is good in the world. + pub fn with_mutable_state_for_block>( + &self, + block: &SignedBeaconBlock, + block_root: Hash256, + f: F, + ) -> Result, Error> + where + F: FnOnce(&mut BeaconState, bool) -> Result, + { + if let Some(state) = self + .snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .ok_or(Error::SnapshotCacheLockTimeout)? + .borrow_unadvanced_state_mut(block_root) + { + let cache_hit = true; + f(state, cache_hit).map(Some) + } else if let Some(mut state) = self.get_state(&block.state_root(), Some(block.slot()))? { + let cache_hit = false; + f(&mut state, cache_hit).map(Some) + } else { + Ok(None) + } + } + /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 286cc17a96..f970c5607e 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -47,6 +47,8 @@ pub struct ChainConfig { pub count_unrealized_full: CountUnrealizedFull, /// Optionally set timeout for calls to checkpoint sync endpoint. pub checkpoint_sync_url_timeout: u64, + /// Whether to enable the light client server protocol. + pub enable_light_client_server: bool, } impl Default for ChainConfig { @@ -68,6 +70,7 @@ impl Default for ChainConfig { paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, + enable_light_client_server: false, } } } diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 40b73451cb..33447bc2ef 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -298,6 +298,27 @@ impl SnapshotCache { }) } + /// Borrow the state corresponding to `block_root` if it exists in the cache *unadvanced*. + /// + /// Care must be taken not to mutate the state in an invalid way. This function should only + /// be used to mutate the *caches* of the state, for example the tree hash cache when + /// calculating a light client merkle proof. + pub fn borrow_unadvanced_state_mut( + &mut self, + block_root: Hash256, + ) -> Option<&mut BeaconState> { + self.snapshots + .iter_mut() + .find(|snapshot| { + // If the pre-state exists then state advance has already taken the state for + // `block_root` and mutated its tree hash cache. Rather than re-building it while + // holding the snapshot cache lock (>1 second), prefer to return `None` from this + // function and force the caller to load it from disk. + snapshot.beacon_block_root == block_root && snapshot.pre_state.is_none() + }) + .map(|snapshot| &mut snapshot.beacon_state) + } + /// If there is a snapshot with `block_root`, clone it and return the clone. pub fn get_cloned( &self, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 16a6794f43..b00d56513c 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -868,4 +868,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Useful if you intend to run a non-validating beacon node.") .takes_value(false) ) + .arg( + Arg::with_name("light-client-server") + .long("light-client-server") + .help("Act as a full node supporting light clients on the p2p network \ + [experimental]") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6af753afea..99e0af6e4c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -705,6 +705,9 @@ pub fn get_config( client_config.chain.builder_fallback_disable_checks = cli_args.is_present("builder-fallback-disable-checks"); + // Light client server config. + client_config.chain.enable_light_client_server = cli_args.is_present("light-client-server"); + Ok(client_config) } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a6b913bcb9..79625c12e3 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1702,12 +1702,12 @@ impl BeaconState { }; // 2. Get all `BeaconState` leaves. - let cache = self.tree_hash_cache_mut().take(); - let leaves = if let Some(mut cache) = cache { - cache.recalculate_tree_hash_leaves(self)? - } else { - return Err(Error::TreeHashCacheNotInitialized); - }; + let mut cache = self + .tree_hash_cache_mut() + .take() + .ok_or(Error::TreeHashCacheNotInitialized)?; + let leaves = cache.recalculate_tree_hash_leaves(self)?; + self.tree_hash_cache_mut().restore(cache); // 3. Make deposit tree. // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f24ba6895e..d69361a3a4 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1580,3 +1580,18 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { .run_with_zero_port() .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); } + +#[test] +fn light_client_server_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.enable_light_client_server, false)); +} + +#[test] +fn light_client_server_enabled() { + CommandLineTest::new() + .flag("light-client-server", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.enable_light_client_server, true)); +} diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 158e875810..892b9a3770 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -39,8 +39,6 @@ excluded_paths = [ "tests/.*/.*/ssz_static/LightClientOptimistic", # LightClientFinalityUpdate "tests/.*/.*/ssz_static/LightClientFinalityUpdate", - # Merkle-proof tests for light clients - "tests/.*/.*/merkle/single_proof", # Capella tests are disabled for now. "tests/.*/capella", # One of the EF researchers likes to pack the tarballs on a Mac diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 3a6f4acf1e..a57abc2e07 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -78,6 +78,10 @@ impl Case for MerkleProofValidity { ))); } } + + // Tree hash cache should still be initialized (not dropped). + assert!(state.tree_hash_cache().is_initialized()); + Ok(()) } } From 5dba89e43b1f1ea08af60c6b1476b8f838a63466 Mon Sep 17 00:00:00 2001 From: tim gretler Date: Sun, 13 Nov 2022 22:40:43 +0000 Subject: [PATCH 20/37] Sync committee sign bn fallback (#3624) ## Issue Addressed Closes #3612 ## Proposed Changes - Iterates through BNs until it finds a non-optimistic head. A slight change in error behavior: - Previously: `spawn_contribution_tasks` did not return an error for a non-optimistic block head. It returned `Ok(())` logged a warning. - Now: `spawn_contribution_tasks` returns an error if it cannot find a non-optimistic block head. The caller of `spawn_contribution_tasks` then logs the error as a critical error. Co-authored-by: Michael Sproul --- .../src/sync_committee_service.rs | 49 ++++++++++--------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 5b95945302..3647396ed5 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -174,39 +174,40 @@ impl SyncCommitteeService { return Ok(()); } - // Fetch `block_root` and `execution_optimistic` for `SyncCommitteeContribution`. + // Fetch `block_root` with non optimistic execution for `SyncCommitteeContribution`. let response = self .beacon_nodes - .first_success(RequireSynced::Yes, OfflineOnFailure::Yes,|beacon_node| async move { - beacon_node.get_beacon_blocks_root(BlockId::Head).await - }) - .await - .map_err(|e| e.to_string())? - .ok_or_else(|| format!("No block root found for slot {}", slot))?; + .first_success( + RequireSynced::Yes, + OfflineOnFailure::Yes, + |beacon_node| async move { + match beacon_node.get_beacon_blocks_root(BlockId::Head).await { + Ok(Some(block)) if block.execution_optimistic == Some(false) => { + Ok(block) + } + Ok(Some(_)) => { + Err(format!("To sign sync committee messages for slot {slot} a non-optimistic head block is required")) + } + Ok(None) => Err(format!("No block root found for slot {}", slot)), + Err(e) => Err(e.to_string()), + } + }, + ) + .await; - let block_root = response.data.root; - if let Some(execution_optimistic) = response.execution_optimistic { - if execution_optimistic { + let block_root = match response { + Ok(block) => block.data.root, + Err(errs) => { warn!( log, - "Refusing to sign sync committee messages for optimistic head block"; + "Refusing to sign sync committee messages for an optimistic head block or \ + a block head with unknown optimistic status"; + "errors" => errs.to_string(), "slot" => slot, ); return Ok(()); } - } else if let Some(bellatrix_fork_epoch) = self.duties_service.spec.bellatrix_fork_epoch { - // If the slot is post Bellatrix, do not sign messages when we cannot verify the - // optimistic status of the head block. - if slot.epoch(E::slots_per_epoch()) > bellatrix_fork_epoch { - warn!( - log, - "Refusing to sign sync committee messages for a head block with an unknown \ - optimistic status"; - "slot" => slot, - ); - return Ok(()); - } - } + }; // Spawn one task to publish all of the sync committee signatures. let validator_duties = slot_duties.duties; From 9bd6d9ce7a9b4a6fba70253b20cd06b6eeff6660 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sun, 13 Nov 2022 22:40:44 +0000 Subject: [PATCH 21/37] CI gardening maintenance (#3706) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/3656 ## Proposed Changes * Replace `set-output` by `$GITHUB_OUTPUT` usage * Avoid rate-limits when installing `protoc` by making authenticated requests (continuation of https://github.com/sigp/lighthouse/pull/3621) * Upgrade all Ubuntu 18.04 usage to 22.04 (18.04 is end of life) * Upgrade macOS-latest to explicit macOS-12 to silence warning * Use `actions/checkout@v3` and `actions/cache@v3` to avoid deprecated NodeJS v12 ## Additional Info Can't silence the NodeJS warnings entirely due to https://github.com/sigp/lighthouse/issues/3705. Can fix that in future. --- .github/workflows/docker-antithesis.yml | 2 +- .github/workflows/docker.yml | 12 ++-- .github/workflows/linkcheck.yml | 2 +- .github/workflows/local-testnet.yml | 8 +-- .github/workflows/publish-crate.yml | 4 +- .github/workflows/release.yml | 8 +-- .github/workflows/test-suite.yml | 92 +++++++++++++++++-------- 7 files changed, 81 insertions(+), 47 deletions(-) diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml index 40de0bd0a5..84f5541a3c 100644 --- a/.github/workflows/docker-antithesis.yml +++ b/.github/workflows/docker-antithesis.yml @@ -17,7 +17,7 @@ jobs: build-docker: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Update Rust run: rustup update stable - name: Dockerhub login diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8d72319c60..13b8411695 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -22,7 +22,7 @@ jobs: # `unstable`, but for now we keep the two parts of the version separate for backwards # compatibility. extract-version: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - name: Extract version (if stable) if: github.event.ref == 'refs/heads/stable' @@ -44,7 +44,7 @@ jobs: VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: name: build-docker-${{ matrix.binary }} - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 strategy: matrix: binary: [aarch64, @@ -61,7 +61,7 @@ jobs: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Update Rust run: rustup update stable - name: Dockerhub login @@ -102,7 +102,7 @@ jobs: --push build-docker-multiarch: name: build-docker-multiarch${{ matrix.modernity }} - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 needs: [build-docker-single-arch, extract-version] strategy: matrix: @@ -123,13 +123,13 @@ jobs: --amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} build-docker-lcli: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 needs: [extract-version] env: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 30a891febf..4d4e92ae14 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Create docker network run: docker network create book diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 170bd9e212..b916ffee65 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -12,11 +12,11 @@ jobs: strategy: matrix: os: - - ubuntu-18.04 - - macos-latest + - ubuntu-22.04 + - macos-12 runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable @@ -28,7 +28,7 @@ jobs: run: npm install ganache@latest --global # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - - uses: actions/cache@v2 + - uses: actions/cache@v3 id: cache-cargo with: path: | diff --git a/.github/workflows/publish-crate.yml b/.github/workflows/publish-crate.yml index a7fda90f74..736057f785 100644 --- a/.github/workflows/publish-crate.yml +++ b/.github/workflows/publish-crate.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Extract tag - run: echo "::set-output name=TAG::$(echo ${GITHUB_REF#refs/tags/})" + run: echo "TAG=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT id: extract_tag outputs: TAG: ${{ steps.extract_tag.outputs.TAG }} @@ -30,7 +30,7 @@ jobs: env: TAG: ${{ needs.extract-tag.outputs.TAG }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Update Rust run: rustup update stable - name: Cargo login diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6edb1f76c1..957d016dc6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Extract version - run: echo "::set-output name=VERSION::$(echo ${GITHUB_REF#refs/tags/})" + run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT id: extract_version outputs: VERSION: ${{ steps.extract_version.outputs.VERSION }} @@ -62,7 +62,7 @@ jobs: needs: extract-version steps: - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Build toolchain uses: actions-rs/toolchain@v1 with: @@ -199,7 +199,7 @@ jobs: steps: # This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts. - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 @@ -216,7 +216,7 @@ jobs: - name: Generate Full Changelog id: changelog - run: echo "::set-output name=CHANGELOG::$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" + run: echo "CHANGELOG=$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT - name: Create Release Draft env: diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index a3e9625b50..423f3deca2 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -24,12 +24,12 @@ jobs: extract-msrv: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Extract Minimum Supported Rust Version (MSRV) run: | metadata=$(cargo metadata --no-deps --format-version 1) msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') - echo "::set-output name=MSRV::$msrv" + echo "MSRV=$msrv" >> $GITHUB_OUTPUT id: extract_msrv outputs: MSRV: ${{ steps.extract_msrv.outputs.MSRV }} @@ -37,7 +37,7 @@ jobs: name: cargo-fmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Check formatting with cargo fmt @@ -47,11 +47,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run tests in release @@ -61,7 +63,7 @@ jobs: runs-on: windows-2019 needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Use Node.js @@ -89,11 +91,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run beacon_chain tests for all known forks run: make test-beacon-chain op-pool-tests: @@ -101,11 +105,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run operation_pool tests for all known forks run: make test-op-pool slasher-tests: @@ -113,7 +119,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Run slasher tests for all supported backends @@ -123,11 +129,13 @@ jobs: runs-on: ubuntu-22.04 needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run tests in debug @@ -137,11 +145,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: @@ -149,11 +159,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run consensus-spec-tests with blst, milagro and fake_crypto run: make test-ef dockerfile-ubuntu: @@ -161,7 +173,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Build the root Dockerfile @@ -173,11 +185,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim that starts from an eth1 contract @@ -187,11 +201,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim and go through the merge transition @@ -201,11 +217,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim without an eth1 connection @@ -215,11 +233,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the syncing simulator @@ -229,11 +249,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Install lighthouse and lcli @@ -253,17 +275,19 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 - - uses: actions/setup-go@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 with: go-version: '1.17' - - uses: actions/setup-dotnet@v1 + - uses: actions/setup-dotnet@v3 with: dotnet-version: '6.0.201' - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release run: make test-exec-engine check-benchmarks: @@ -271,11 +295,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it run: make check-benches check-consensus: @@ -283,7 +309,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Typecheck consensus code in strict mode @@ -293,11 +319,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness @@ -308,7 +336,7 @@ jobs: needs: cargo-fmt continue-on-error: true steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Install SigP Clippy fork run: | cd .. @@ -319,6 +347,8 @@ jobs: cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run Clippy with the disallowed-from-async lint run: make nightly-lint check-msrv: @@ -326,11 +356,13 @@ jobs: runs-on: ubuntu-latest needs: [cargo-fmt, extract-msrv] steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run cargo check run: cargo check --workspace arbitrary-check: @@ -338,7 +370,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Validate state_processing feature arbitrary-fuzz @@ -348,7 +380,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database @@ -358,7 +390,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose run: CARGO_HOME=$(readlink -f $HOME) make vendor cargo-udeps: @@ -366,13 +398,15 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY # NOTE: cargo-udeps version is pinned until this issue is resolved: # https://github.com/est31/cargo-udeps/issues/135 - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps run: cargo install cargo-udeps --locked --force --version 0.1.30 - name: Create Cargo config dir From 230168deffc0adcae90e53256d42942b3ae97d93 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 15 Nov 2022 05:21:26 +0000 Subject: [PATCH 22/37] Health Endpoints for UI (#3668) This PR adds some health endpoints for the beacon node and the validator client. Specifically it adds the endpoint: `/lighthouse/ui/health` These are not entirely stable yet. But provide a base for modification for our UI. These also may have issues with various platforms and may need modification. --- Cargo.lock | 948 +++++++++++------- Cargo.toml | 1 + beacon_node/client/src/config.rs | 13 +- beacon_node/http_api/Cargo.toml | 3 + beacon_node/http_api/src/lib.rs | 65 ++ beacon_node/http_api/tests/common.rs | 2 + .../src/service/behaviour.rs | 1 - beacon_node/src/config.rs | 25 +- book/src/api-lighthouse.md | 37 + book/src/api-vc-endpoints.md | 40 + common/system_health/Cargo.toml | 13 + common/system_health/src/lib.rs | 241 +++++ database_manager/src/lib.rs | 7 +- lighthouse/tests/beacon_node.rs | 4 +- testing/node_test_rig/src/lib.rs | 2 +- validator_client/Cargo.toml | 3 + validator_client/src/http_api/mod.rs | 51 + 17 files changed, 1087 insertions(+), 369 deletions(-) create mode 100644 common/system_health/Cargo.toml create mode 100644 common/system_health/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index c759c46f36..588a76e78f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -111,16 +111,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] @@ -130,6 +130,15 @@ name = "amcl" version = "0.3.0" source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -141,24 +150,24 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.58" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" [[package]] name = "arbitrary" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a7924531f38b1970ff630f03eb20a2fde69db5c590c93b0f3482e95dcc5fd60" +checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" dependencies = [ "derive_arbitrary", ] [[package]] name = "arc-swap" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" [[package]] name = "arrayref" @@ -201,9 +210,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.56" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" +checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" dependencies = [ "proc-macro2", "quote", @@ -285,9 +294,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.16" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" +checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" dependencies = [ "async-trait", "axum-core", @@ -297,7 +306,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.2", + "itoa 1.0.4", "matchit", "memchr", "mime", @@ -316,9 +325,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" +checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" dependencies = [ "async-trait", "bytes", @@ -353,15 +362,15 @@ checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64ct" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "beacon-api-client" @@ -535,7 +544,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -550,9 +559,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] @@ -662,15 +671,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.10.0" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "byte-slice-cast" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c5fdd0166095e1d463fc6cc01aa8ce547ad77a4e84d42eb6762b084e28067e" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byteorder" @@ -680,9 +689,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0b3de4a0c5e67e16066a0715723abd91edc2f9001d09c46e1dca929351e130e" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" dependencies = [ "serde", ] @@ -731,9 +740,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.73" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "581f5dba903aac52ea3feb5ec4810848460ee833876f1f9b0fdeab1f19091574" [[package]] name = "cexpr" @@ -777,14 +786,16 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", "num-traits", "time 0.1.44", + "wasm-bindgen", "winapi", ] @@ -799,9 +810,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.3.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ "glob", "libc", @@ -871,7 +882,7 @@ dependencies = [ "slot_clock", "store", "task_executor", - "time 0.3.11", + "time 0.3.16", "timer", "tokio", "types", @@ -879,13 +890,23 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.48" +version = "0.1.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" +checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" dependencies = [ "cc", ] +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + [[package]] name = "compare_fields" version = "0.2.0" @@ -1025,26 +1046,24 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", "memoffset", - "once_cell", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -1055,12 +1074,12 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1128,11 +1147,11 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.2" +version = "3.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" +checksum = "1d91974fbbe88ec1df0c24a4f00f99583667a7e2e6272b2b92d294d81e462173" dependencies = [ - "nix 0.24.2", + "nix 0.25.0", "winapi", ] @@ -1157,11 +1176,55 @@ checksum = "4033478fbf70d6acf2655ac70da91ee65852d69daf7a67bf7a2f518fb47aafcf" dependencies = [ "byteorder", "digest 0.9.0", - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", "zeroize", ] +[[package]] +name = "cxx" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "darling" version = "0.13.4" @@ -1249,9 +1312,9 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" [[package]] name = "delay_map" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6716ce9729be9628979ae1ff63e8bc8b7ad53b5472a2633bf079607a55328d36" +checksum = "9c4d75d3abfe4830dcbf9bcb1b926954e121669f74dd1ca7aa0183b1755d83f6" dependencies = [ "futures", "tokio-util 0.6.10", @@ -1266,7 +1329,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.10.2", + "sha2 0.10.6", "tree_hash", "types", ] @@ -1294,9 +1357,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a577516173adb681466d517d39bd468293bc2c2a16439375ef0f35bba45f3d" +checksum = "4903dff04948f22033ca30232ab8eca2c3fc4c913a8b6a34ee5199699814817f" dependencies = [ "proc-macro2", "quote", @@ -1327,11 +1390,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", "subtle", ] @@ -1422,9 +1485,9 @@ dependencies = [ [[package]] name = "dtoa" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5caaa75cbd2b960ff1e5392d2cfb1f44717fffe12fc1f32b7b5d1267f99732a6" +checksum = "f8a6eee2d5d0d113f015688310da018bd1d864d86bd567c8fca9c266889e1bfa" [[package]] name = "ecdsa" @@ -1494,9 +1557,9 @@ dependencies = [ [[package]] name = "either" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "elliptic-curve" @@ -1507,12 +1570,12 @@ dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.3", + "digest 0.10.5", "ff", "generic-array", "group", "pkcs8", - "rand_core 0.6.3", + "rand_core 0.6.4", "sec1", "subtle", "zeroize", @@ -1543,7 +1606,7 @@ dependencies = [ "rand 0.8.5", "rlp", "serde", - "sha3 0.10.1", + "sha3 0.10.6", "zeroize", ] @@ -1571,9 +1634,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "c90bf5f19754d10198ccb95b70664fc925bd1fc090a0fd9a6ebc54acc8cd6272" dependencies = [ "atty", "humantime", @@ -1702,7 +1765,7 @@ dependencies = [ "lazy_static", "ring", "rustc-hex", - "sha2 0.10.2", + "sha2 0.10.6", "wasm-bindgen-test", ] @@ -1729,7 +1792,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.10.2", + "sha2 0.10.6", "zeroize", ] @@ -1858,9 +1921,9 @@ dependencies = [ [[package]] name = "ethabi" -version = "17.1.0" +version = "17.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f186de076b3e77b8e6d73c99d1b52edc2a229e604f4b5eb6992c06c11d79d537" +checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" dependencies = [ "ethereum-types 0.13.1", "hex", @@ -1868,7 +1931,7 @@ dependencies = [ "regex", "serde", "serde_json", - "sha3 0.10.1", + "sha3 0.10.6", "thiserror", "uint", ] @@ -1959,7 +2022,7 @@ dependencies = [ "bytes", "chrono", "elliptic-curve", - "ethabi 17.1.0", + "ethabi 17.2.0", "fastrlp", "generic-array", "hex", @@ -1990,7 +2053,7 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.7", + "getrandom 0.2.8", "hashers", "hex", "http", @@ -2126,9 +2189,9 @@ dependencies = [ [[package]] name = "fastrlp-derive" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fa41ebc231af281098b11ad4a4f6182ec9096902afffe948034a20d4e1385a" +checksum = "d9e9158c1d8f0a7a716c9191562eaabba70268ba64972ef4871ce8d66fd08872" dependencies = [ "bytes", "proc-macro2", @@ -2138,11 +2201,11 @@ dependencies = [ [[package]] name = "ff" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", ] @@ -2237,11 +2300,10 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -2269,9 +2331,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -2284,9 +2346,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -2294,15 +2356,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -2312,15 +2374,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", @@ -2329,26 +2391,26 @@ dependencies = [ [[package]] name = "futures-rustls" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01fe9932a224b72b45336d96040aa86386d674a31d0af27d800ea7bc8ca97fe" +checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", - "rustls 0.20.6", + "rustls 0.20.7", "webpki 0.22.0", ] [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-timer" @@ -2358,9 +2420,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures-channel", "futures-core", @@ -2385,9 +2447,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -2429,9 +2491,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "js-sys", @@ -2486,20 +2548,20 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "group" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff", - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", ] [[package]] name = "h2" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -2510,7 +2572,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.3", + "tokio-util 0.7.4", "tracing", ] @@ -2558,9 +2620,9 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64", "bitflags", @@ -2569,7 +2631,7 @@ dependencies = [ "http", "httpdate", "mime", - "sha-1 0.10.0", + "sha1", ] [[package]] @@ -2643,7 +2705,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -2676,7 +2738,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.2", + "itoa 1.0.4", ] [[package]] @@ -2702,6 +2764,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "bs58", + "directory", "environment", "eth1", "eth2", @@ -2726,6 +2789,8 @@ dependencies = [ "slot_clock", "state_processing", "store", + "sysinfo", + "system_health", "task_executor", "tokio", "tokio-stream", @@ -2759,9 +2824,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -2777,9 +2842,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "abfba89e19b959ca163c7752ba59d737c1ceea53a5d31a149c805446fc958064" dependencies = [ "bytes", "futures-channel", @@ -2790,7 +2855,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.2", + "itoa 1.0.4", "pin-project-lite 0.2.9", "socket2", "tokio", @@ -2807,7 +2872,7 @@ checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ "http", "hyper", - "rustls 0.20.6", + "rustls 0.20.7", "tokio", "tokio-rustls 0.23.4", ] @@ -2825,6 +2890,30 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -2842,6 +2931,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.6.7" @@ -2901,7 +3000,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", ] [[package]] @@ -2993,9 +3092,9 @@ checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] @@ -3008,15 +3107,15 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "js-sys" -version = "0.3.59" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] @@ -3059,8 +3158,8 @@ dependencies = [ "cfg-if", "ecdsa", "elliptic-curve", - "sha2 0.10.2", - "sha3 0.10.1", + "sha2 0.10.6", + "sha3 0.10.6", ] [[package]] @@ -3095,7 +3194,7 @@ dependencies = [ "clap_utils", "deposit_contract", "directory", - "env_logger 0.9.0", + "env_logger 0.9.1", "environment", "eth1_test_rig", "eth2", @@ -3145,9 +3244,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.126" +version = "0.2.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" [[package]] name = "libflate" @@ -3181,9 +3280,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" +checksum = "292a948cd991e376cf75541fe5b97a1081d713c618b4f1b9500f8844e49eb565" [[package]] name = "libmdbx" @@ -3209,7 +3308,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.7", + "getrandom 0.2.8", "instant", "lazy_static", "libp2p-core", @@ -3259,7 +3358,7 @@ dependencies = [ "prost-build", "rand 0.8.5", "rw-stream-sink", - "sha2 0.10.2", + "sha2 0.10.6", "smallvec", "thiserror", "unsigned-varint 0.7.1", @@ -3303,7 +3402,7 @@ dependencies = [ "prost-build", "rand 0.7.3", "regex", - "sha2 0.10.2", + "sha2 0.10.6", "smallvec", "unsigned-varint 0.7.1", "wasm-timer", @@ -3376,7 +3475,7 @@ dependencies = [ "prost", "prost-build", "rand 0.8.5", - "sha2 0.10.2", + "sha2 0.10.6", "snow", "static_assertions", "x25519-dalek", @@ -3422,9 +3521,9 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.30.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f02622b9dd150011b4eeec387f8bd013189a2f27da08ba363e7c6e606d77a48" +checksum = "a0eddc4497a8b5a506013c40e8189864f9c3a00db2b25671f428ae9007f3ba32" dependencies = [ "heck", "quote", @@ -3563,7 +3662,7 @@ dependencies = [ "clap_utils", "database_manager", "directory", - "env_logger 0.9.0", + "env_logger 0.9.1", "environment", "eth1", "eth2_hashing", @@ -3627,7 +3726,7 @@ dependencies = [ "regex", "serde", "serde_derive", - "sha2 0.10.2", + "sha2 0.10.6", "slog", "slog-async", "slog-term", @@ -3656,6 +3755,15 @@ dependencies = [ "target_info", ] +[[package]] +name = "link-cplusplus" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +dependencies = [ + "cc", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -3685,9 +3793,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg 1.1.0", "scopeguard", @@ -3888,23 +3996,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -3953,14 +4061,14 @@ dependencies = [ [[package]] name = "multihash" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3db354f401db558759dfc1e568d010a5d4146f4d3f637be1275ec4a3cf09689" +checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" dependencies = [ "core2", - "digest 0.10.3", + "digest 0.10.5", "multihash-derive", - "sha2 0.10.2", + "sha2 0.10.6", "unsigned-varint 0.7.1", ] @@ -4093,10 +4201,11 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" dependencies = [ + "autocfg 1.1.0", "bitflags", "cfg-if", "libc", @@ -4139,6 +4248,25 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "ntapi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc51db7b362b205941f71232e56c625156eb9a929f8cf74a428fd5bc094a4afc" +dependencies = [ + "winapi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -4229,9 +4357,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.13.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "oneshot_broadcast" @@ -4254,9 +4382,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.41" +version = "0.10.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" +checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" dependencies = [ "bitflags", "cfg-if", @@ -4295,9 +4423,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.75" +version = "0.9.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" +checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" dependencies = [ "autocfg 1.1.0", "cc", @@ -4330,6 +4458,12 @@ dependencies = [ "types", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "p256" version = "0.11.1" @@ -4338,7 +4472,7 @@ checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ "ecdsa", "elliptic-curve", - "sha2 0.10.2", + "sha2 0.10.6", ] [[package]] @@ -4357,9 +4491,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.1.5" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" +checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" dependencies = [ "arrayvec", "bitvec 1.0.1", @@ -4411,7 +4545,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.3", + "parking_lot_core 0.9.4", ] [[package]] @@ -4430,22 +4564,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] name = "paste" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "pbkdf2" @@ -4482,15 +4616,15 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0560d531d1febc25a3c9398a62a71256c0178f2e3443baedd9ad4bb8c9deb4" +checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" dependencies = [ "thiserror", "ucd-trie", @@ -4518,18 +4652,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -4566,9 +4700,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "platforms" @@ -4578,9 +4712,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "716b4eeb6c4a1d3ecc956f75b43ec2e8e8ba80026413e70a3f41fd3313d3492b" +checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" dependencies = [ "num-traits", "plotters-backend", @@ -4597,9 +4731,9 @@ checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" [[package]] name = "plotters-svg" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0918736323d1baff32ee0eade54984f6f201ad7e97d5cfb5d6ab4a358529615" +checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" dependencies = [ "plotters-backend", ] @@ -4661,10 +4795,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ + "once_cell", "thiserror", "toml", ] @@ -4701,9 +4836,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.42" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278e965f1d8cf32d6e0e96de3d3e79712178ae67986d9cf9151f51e95aac89b" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ "unicode-ident", ] @@ -4722,9 +4857,9 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cface98dfa6d645ea4c789839f176e4b072265d085bfcc48eaa8d137f58d3c39" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ "cfg-if", "fnv", @@ -4737,12 +4872,12 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603" +checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa 1.0.2", + "itoa 1.0.4", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -4838,9 +4973,9 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.27.1" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "psutil" @@ -4903,9 +5038,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] @@ -4964,7 +5099,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -4984,7 +5119,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -4998,11 +5133,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] @@ -5020,7 +5155,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -5062,7 +5197,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "redox_syscall", "thiserror", ] @@ -5104,9 +5239,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" +checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" dependencies = [ "base64", "bytes", @@ -5121,13 +5256,13 @@ dependencies = [ "hyper-tls", "ipnet", "js-sys", - "lazy_static", "log", "mime", "native-tls", + "once_cell", "percent-encoding", "pin-project-lite 0.2.9", - "rustls 0.20.6", + "rustls 0.20.7", "rustls-pemfile", "serde", "serde_json", @@ -5135,7 +5270,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.23.4", - "tokio-util 0.7.3", + "tokio-util 0.7.4", "tower-service", "url", "wasm-bindgen", @@ -5189,9 +5324,9 @@ checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" [[package]] name = "rlp" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999508abb0ae792aabed2460c45b89106d97fe4adac593bdaef433c2605847b5" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", "rustc-hex", @@ -5286,7 +5421,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.12", + "semver 1.0.14", ] [[package]] @@ -5304,9 +5439,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.6" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", "ring", @@ -5316,18 +5451,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ "base64", ] [[package]] name = "rustversion" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8" +checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" [[package]] name = "rw-stream-sink" @@ -5342,9 +5477,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "safe_arith" @@ -5381,7 +5516,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "windows-sys", + "windows-sys 0.36.1", ] [[package]] @@ -5395,9 +5530,9 @@ dependencies = [ [[package]] name = "scoped-tls" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" @@ -5405,6 +5540,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" + [[package]] name = "scrypt" version = "0.7.0" @@ -5471,9 +5612,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -5512,9 +5653,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" +checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" [[package]] name = "semver-parser" @@ -5547,9 +5688,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.140" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc855a42c7967b7c369eb5860f7164ef1f6f81c20c7cc1141f2a604e18723b03" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] @@ -5576,9 +5717,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.140" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f2122636b9fe3b81f1cb25099fcf2d3f542cdb1d45940d56c713158884a05da" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", @@ -5587,20 +5728,20 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.82" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ - "itoa 1.0.2", + "itoa 1.0.4", "ryu", "serde", ] [[package]] name = "serde_repr" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ad84e47328a31223de7fed7a4f5087f2d6ddfe586cf3ca25b7a165bc0a5aed" +checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2", "quote", @@ -5614,7 +5755,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.2", + "itoa 1.0.4", "ryu", "serde", ] @@ -5674,7 +5815,18 @@ checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", +] + +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.5", ] [[package]] @@ -5692,13 +5844,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -5715,11 +5867,11 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", "keccak", ] @@ -5749,12 +5901,12 @@ dependencies = [ [[package]] name = "signature" -version = "1.6.3" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb766570a2825fa972bceff0d195727876a9cdf2460ab2e52d455dc2de47fd9" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.3", - "rand_core 0.6.3", + "digest 0.10.5", + "rand_core 0.6.4", ] [[package]] @@ -5766,7 +5918,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.11", + "time 0.3.16", ] [[package]] @@ -5774,7 +5926,7 @@ name = "simulator" version = "0.2.0" dependencies = [ "clap", - "env_logger 0.9.0", + "env_logger 0.9.1", "eth1", "eth1_test_rig", "execution_layer", @@ -5892,7 +6044,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.11", + "time 0.3.16", ] [[package]] @@ -5937,7 +6089,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.11", + "time 0.3.16", ] [[package]] @@ -5976,9 +6128,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "snap" @@ -5996,18 +6148,18 @@ dependencies = [ "blake2", "chacha20poly1305", "curve25519-dalek 4.0.0-pre.1", - "rand_core 0.6.3", + "rand_core 0.6.4", "ring", "rustc_version 0.4.0", - "sha2 0.10.2", + "sha2 0.10.6", "subtle", ] [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -6078,7 +6230,7 @@ dependencies = [ "beacon_chain", "bls", "derivative", - "env_logger 0.9.0", + "env_logger 0.9.1", "eth2_hashing", "eth2_ssz", "eth2_ssz_derive", @@ -6163,9 +6315,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", "proc-macro2", @@ -6205,9 +6357,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.98" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" dependencies = [ "proc-macro2", "quote", @@ -6232,6 +6384,34 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sysinfo" +version = "0.26.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c375d5fd899e32847b8566e10598d6e9f1d9b55ec6de3cdf9e7da4bdc51371bc" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "winapi", +] + +[[package]] +name = "system_health" +version = "0.1.0" +dependencies = [ + "lighthouse_network", + "parking_lot 0.12.1", + "serde", + "serde_derive", + "serde_json", + "sysinfo", + "types", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -6331,18 +6511,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -6380,21 +6560,32 @@ dependencies = [ [[package]] name = "time" -version = "0.3.11" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217" +checksum = "0fab5c8b9980850e06d92ddbe3ab839c062c801f3927c0fb8abd6fc8e918fbca" dependencies = [ - "itoa 1.0.2", + "itoa 1.0.4", "libc", "num_threads", + "serde", + "time-core", "time-macros", ] [[package]] -name = "time-macros" -version = "0.2.4" +name = "time-core" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + +[[package]] +name = "time-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bb801831d812c562ae7d2bfb531f26e66e4e1f6b17307ba4149c5064710e5b" +dependencies = [ + "time-core", +] [[package]] name = "timer" @@ -6462,9 +6653,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.20.1" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" dependencies = [ "autocfg 1.1.0", "bytes", @@ -6472,7 +6663,6 @@ dependencies = [ "memchr", "mio", "num_cpus", - "once_cell", "parking_lot 0.12.1", "pin-project-lite 0.2.9", "signal-hook-registry", @@ -6529,21 +6719,21 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls 0.20.6", + "rustls 0.20.7", "tokio", "webpki 0.22.0", ] [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.3", + "tokio-util 0.7.4", ] [[package]] @@ -6567,7 +6757,7 @@ checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "rustls 0.20.6", + "rustls 0.20.7", "tokio", "tokio-rustls 0.23.4", "tungstenite 0.17.3", @@ -6593,9 +6783,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -6651,9 +6841,9 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" @@ -6663,9 +6853,9 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", @@ -6676,9 +6866,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", @@ -6687,9 +6877,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", "valuable", @@ -6718,12 +6908,12 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ - "ansi_term", "matchers", + "nu-ansi-term", "once_cell", "regex", "sharded-slab", @@ -6790,7 +6980,7 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.2.3", "ipnet", "lazy_static", "log", @@ -6860,7 +7050,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "rustls 0.20.6", + "rustls 0.20.7", "sha-1 0.10.0", "thiserror", "url", @@ -6935,15 +7125,15 @@ dependencies = [ [[package]] name = "ucd-trie" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89570599c4fe5585de2b388aab47e99f7fa4e9238a1399f707a02e356058141c" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "uint" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" +checksum = "a45526d29728d135c2900b0d30573fe3ee79fceb12ef534c7bb30e810a91b601" dependencies = [ "arbitrary", "byteorder", @@ -6975,30 +7165,30 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-normalization" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" @@ -7042,13 +7232,12 @@ version = "0.1.0" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.3.0", "percent-encoding", ] @@ -7064,7 +7253,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "serde", ] @@ -7110,6 +7299,8 @@ dependencies = [ "slashing_protection", "slog", "slot_clock", + "sysinfo", + "system_health", "task_executor", "tempfile", "tokio", @@ -7257,9 +7448,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -7267,9 +7458,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", "log", @@ -7282,9 +7473,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.32" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa76fb221a1f8acddf5b54ace85912606980ad661ac7a503b4570ffd3a624dad" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ "cfg-if", "js-sys", @@ -7294,9 +7485,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7304,9 +7495,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -7317,15 +7508,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "wasm-bindgen-test" -version = "0.3.32" +version = "0.3.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513df541345bb9fcc07417775f3d51bbb677daf307d8035c0afafd87dc2e6599" +checksum = "09d2fff962180c3fadf677438054b1db62bee4aa32af26a45388af07d1287e1d" dependencies = [ "console_error_panic_hook", "js-sys", @@ -7337,9 +7528,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.32" +version = "0.3.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6150d36a03e90a3cf6c12650be10626a9902d70c5270fd47d7a47e5389a10d56" +checksum = "4683da3dfc016f704c9f82cf401520c4f1cb3ee440f7f52b3d6ac29506a49ca7" dependencies = [ "proc-macro2", "quote", @@ -7362,9 +7553,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.59" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -7386,7 +7577,7 @@ dependencies = [ "futures-timer", "headers", "hex", - "idna", + "idna 0.2.3", "jsonrpc-core", "log", "once_cell", @@ -7464,22 +7655,22 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" +checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" dependencies = [ "webpki 0.22.0", ] [[package]] name = "which" -version = "4.2.5" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae" +checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" dependencies = [ "either", - "lazy_static", "libc", + "once_cell", ] [[package]] @@ -7543,43 +7734,100 @@ version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", ] +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" + [[package]] name = "windows_aarch64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" + [[package]] name = "windows_i686_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +[[package]] +name = "windows_i686_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" + [[package]] name = "windows_i686_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +[[package]] +name = "windows_i686_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" + [[package]] name = "windows_x86_64_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" + [[package]] name = "windows_x86_64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + [[package]] name = "winreg" version = "0.7.0" @@ -7668,9 +7916,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c0608f53c1dc0bad505d03a34bbd49fbf2ad7b51eb036123e896365532745a1" +checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" dependencies = [ "futures", "log", diff --git a/Cargo.toml b/Cargo.toml index 02cf4d9436..e254400e88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ members = [ "common/oneshot_broadcast", "common/sensitive_url", "common/slot_clock", + "common/system_health", "common/task_executor", "common/target_check", "common/test_random_derive", diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 5e43c1eaad..0a2997762a 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -42,7 +42,7 @@ pub enum ClientGenesis { /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { - pub data_dir: PathBuf, + data_dir: PathBuf, /// Name of the directory inside the data directory where the main "hot" DB is located. pub db_name: String, /// Path where the freezer database will be located. @@ -103,6 +103,17 @@ impl Default for Config { } impl Config { + /// Updates the data directory for the Client. + pub fn set_data_dir(&mut self, data_dir: PathBuf) { + self.data_dir = data_dir.clone(); + self.http_api.data_dir = data_dir; + } + + /// Gets the config's data_dir. + pub fn data_dir(&self) -> &PathBuf { + &self.data_dir + } + /// Get the database path without initialising it. pub fn get_db_path(&self) -> PathBuf { self.get_data_dir().join(&self.db_name) diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index fedd66c540..077e3aa7cd 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -33,6 +33,9 @@ safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" tree_hash = "0.4.1" +sysinfo = "0.26.5" +system_health = { path = "../../common/system_health" } +directory = { path = "../../common/directory" } [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 01cc63ecea..7f6852f364 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -26,12 +26,14 @@ use beacon_chain::{ BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; +use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, EndpointVersion, SkipRandaoVerification, ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -43,6 +45,8 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; +use sysinfo::{System, SystemExt}; +use system_health::observe_system_health_bn; use tokio::sync::mpsc::{Sender, UnboundedSender}; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ @@ -110,6 +114,7 @@ pub struct Config { pub tls_config: Option, pub allow_sync_stalled: bool, pub spec_fork_name: Option, + pub data_dir: PathBuf, } impl Default for Config { @@ -122,6 +127,7 @@ impl Default for Config { tls_config: None, allow_sync_stalled: false, spec_fork_name: None, + data_dir: PathBuf::from(DEFAULT_ROOT_DIR), } } } @@ -323,6 +329,10 @@ pub fn serve( } }); + // Create a `warp` filter for the data_dir. + let inner_data_dir = ctx.config.data_dir.clone(); + let data_dir_filter = warp::any().map(move || inner_data_dir.clone()); + // Create a `warp` filter that provides access to the beacon chain. let inner_ctx = ctx.clone(); let chain_filter = @@ -431,6 +441,37 @@ pub fn serve( let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); + // Create a `warp` filter that provides access to local system information. + let system_info = Arc::new(RwLock::new(sysinfo::System::new())); + { + // grab write access for initialisation + let mut system_info = system_info.write(); + system_info.refresh_disks_list(); + system_info.refresh_networks_list(); + system_info.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything()); + system_info.refresh_cpu(); + } // end lock + + let system_info_filter = + warp::any() + .map(move || system_info.clone()) + .map(|sysinfo: Arc>| { + { + // refresh stats + let mut sysinfo_lock = sysinfo.write(); + sysinfo_lock.refresh_memory(); + sysinfo_lock.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything()); + sysinfo_lock.refresh_cpu(); + sysinfo_lock.refresh_system(); + sysinfo_lock.refresh_networks(); + sysinfo_lock.refresh_disks(); + } // end lock + sysinfo + }); + + let app_start = std::time::Instant::now(); + let app_start_filter = warp::any().map(move || app_start); + /* * * Start of HTTP method definitions. @@ -2822,6 +2863,29 @@ pub fn serve( }) }); + // GET lighthouse/ui/health + let get_lighthouse_ui_health = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("health")) + .and(warp::path::end()) + .and(system_info_filter) + .and(app_start_filter) + .and(data_dir_filter) + .and(network_globals.clone()) + .and_then( + |sysinfo, app_start: std::time::Instant, data_dir, network_globals| { + blocking_json_task(move || { + let app_uptime = app_start.elapsed().as_secs() as u64; + Ok(api_types::GenericResponse::from(observe_system_health_bn( + sysinfo, + data_dir, + app_uptime, + network_globals, + ))) + }) + }, + ); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3271,6 +3335,7 @@ pub fn serve( .or(get_validator_aggregate_attestation.boxed()) .or(get_validator_sync_committee_contribution.boxed()) .or(get_lighthouse_health.boxed()) + .or(get_lighthouse_ui_health.boxed()) .or(get_lighthouse_syncing.boxed()) .or(get_lighthouse_nat.boxed()) .or(get_lighthouse_peers.boxed()) diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index eaf91ce9df..ec1448df7b 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -2,6 +2,7 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChain, BeaconChainTypes, }; +use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use http_api::{Config, Context}; use lighthouse_network::{ @@ -142,6 +143,7 @@ pub async fn create_api_server_on_port( allow_origin: None, tls_config: None, allow_sync_stalled: false, + data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, }, chain: Some(chain.clone()), diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 8327293a74..3adc940a6a 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -8,7 +8,6 @@ use libp2p::gossipsub::subscription_filter::{ }; use libp2p::gossipsub::Gossipsub as BaseGossipsub; use libp2p::identify::Identify; -use libp2p::swarm::NetworkBehaviour; use libp2p::NetworkBehaviour; use types::EthSpec; diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 99e0af6e4c..406074fe38 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -34,13 +34,13 @@ pub fn get_config( let spec = &context.eth2_config.spec; let log = context.log(); - let mut client_config = ClientConfig { - data_dir: get_data_dir(cli_args), - ..Default::default() - }; + let mut client_config = ClientConfig::default(); + + // Update the client's data directory + client_config.set_data_dir(get_data_dir(cli_args)); // If necessary, remove any existing database and configuration - if client_config.data_dir.exists() && cli_args.is_present("purge-db") { + if client_config.data_dir().exists() && cli_args.is_present("purge-db") { // Remove the chain_db. let chain_db = client_config.get_db_path(); if chain_db.exists() { @@ -57,11 +57,11 @@ pub fn get_config( } // Create `datadir` and any non-existing parent directories. - fs::create_dir_all(&client_config.data_dir) + fs::create_dir_all(client_config.data_dir()) .map_err(|e| format!("Failed to create data dir: {}", e))?; // logs the chosen data directory - let mut log_dir = client_config.data_dir.clone(); + let mut log_dir = client_config.data_dir().clone(); // remove /beacon from the end log_dir.pop(); info!(log, "Data directory initialised"; "datadir" => log_dir.into_os_string().into_string().expect("Datadir should be a valid os string")); @@ -69,10 +69,13 @@ pub fn get_config( /* * Networking */ + + let data_dir_ref = client_config.data_dir().clone(); + set_network_config( &mut client_config.network, cli_args, - &client_config.data_dir, + &data_dir_ref, log, false, )?; @@ -303,7 +306,7 @@ pub fn get_config( } else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") { use std::fs::File; use std::io::Write; - secret_file = client_config.data_dir.join(DEFAULT_JWT_FILE); + secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); let mut jwt_secret_key_file = File::create(secret_file.clone()) .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; jwt_secret_key_file @@ -332,7 +335,7 @@ pub fn get_config( clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; - el_config.default_datadir = client_config.data_dir.clone(); + el_config.default_datadir = client_config.data_dir().clone(); el_config.builder_profit_threshold = clap_utils::parse_required(cli_args, "builder-profit-threshold")?; let execution_timeout_multiplier = @@ -573,7 +576,7 @@ pub fn get_config( let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { PathBuf::from(slasher_dir) } else { - client_config.data_dir.join("slasher_db") + client_config.data_dir().join("slasher_db") }; let mut slasher_config = slasher::Config::new(slasher_dir); diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index d9c8080b4d..c1ba6a2dcc 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -62,6 +62,43 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j ``` +### `/lighthouse/ui/health` + + +```bash +curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "total_memory": 16443219968, + "free_memory": 1283739648, + "used_memory": 5586264064, + "sys_loadavg_1": 0.59, + "sys_loadavg_5": 1.13, + "sys_loadavg_15": 2.41, + "cpu_cores": 4, + "cpu_threads": 8, + "global_cpu_frequency": 3.4, + "disk_bytes_total": 502390845440, + "disk_bytes_free": 9981386752, + "network_name": "wlp0s20f3", + "network_bytes_total_received": 14105556611, + "network_bytes_total_transmit": 3649489389, + "nat_open": true, + "connected_peers": 80, + "sync_state": "Synced", + "system_uptime": 660706, + "app_uptime": 105, + "system_name": "Arch Linux", + "kernel_version": "5.19.13-arch1-1", + "os_version": "Linux rolling Arch Linux", + "host_name": "Computer1" + } +} +``` + ### `/lighthouse/syncing` ```bash diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 9aedf6e249..76cffc0e4f 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -6,6 +6,7 @@ HTTP Path | Description | | --- | -- | [`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. [`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. +[`GET /lighthouse/ui/health`](#get-lighthouseuihealth) | Get information about the host machine. Focused for UI applications. [`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator. [`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. [`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. @@ -77,6 +78,45 @@ Returns information regarding the health of the host machine. } ``` +## `GET /lighthouse/ui/health` + +Returns information regarding the health of the host machine. + +### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/ui/health` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | + +### Example Response Body + +```json +{ + "data": { + "total_memory": 16443219968, + "free_memory": 1283739648, + "used_memory": 5586264064, + "sys_loadavg_1": 0.59, + "sys_loadavg_5": 1.13, + "sys_loadavg_15": 2.41, + "cpu_cores": 4, + "cpu_threads": 8, + "global_cpu_frequency": 3.4, + "disk_bytes_total": 502390845440, + "disk_bytes_free": 9981386752, + "system_uptime": 660706, + "app_uptime": 105, + "system_name": "Arch Linux", + "kernel_version": "5.19.13-arch1-1", + "os_version": "Linux rolling Arch Linux", + "host_name": "Computer1" + } +} +``` + ## `GET /lighthouse/spec` Returns the Ethereum proof-of-stake consensus specification loaded for this validator. diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml new file mode 100644 index 0000000000..0956710b82 --- /dev/null +++ b/common/system_health/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "system_health" +version = "0.1.0" +edition = "2021" + +[dependencies] +lighthouse_network = { path = "../../beacon_node/lighthouse_network" } +types = { path = "../../consensus/types" } +sysinfo = "0.26.5" +serde = "1.0.116" +serde_derive = "1.0.116" +serde_json = "1.0.58" +parking_lot = "0.12.0" diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs new file mode 100644 index 0000000000..d10540e506 --- /dev/null +++ b/common/system_health/src/lib.rs @@ -0,0 +1,241 @@ +use lighthouse_network::{types::SyncState, NetworkGlobals}; +use parking_lot::RwLock; +use serde::{Deserialize, Serialize}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use sysinfo::{CpuExt, DiskExt, NetworkExt, NetworksExt, System, SystemExt}; +use types::EthSpec; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SystemHealth { + /// Total memory of the system. + pub total_memory: u64, + /// Total free memory available to the system. + pub free_memory: u64, + /// Total used memory. + pub used_memory: u64, + + /// System load average over 1 minute. + pub sys_loadavg_1: f64, + /// System load average over 5 minutes. + pub sys_loadavg_5: f64, + /// System load average over 15 minutes. + pub sys_loadavg_15: f64, + + /// Total cpu cores. + pub cpu_cores: usize, + /// Total cpu threads. + pub cpu_threads: usize, + /// The global cpu frequency. + pub global_cpu_frequency: f32, + + /// Total capacity of disk. + pub disk_bytes_total: u64, + /// Free space in disk. + pub disk_bytes_free: u64, + + /// System uptime. + pub system_uptime: u64, + /// Application uptime. + pub app_uptime: u64, + /// The System name + pub system_name: String, + /// Kernel version + pub kernel_version: String, + /// OS version + pub os_version: String, + /// Hostname + pub host_name: String, +} + +/// System related health, specific to the UI for the validator client. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SystemHealthVC { + #[serde(flatten)] + pub system_health: SystemHealth, +} + +/// System related health, specific to the UI for the Beacon Node. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SystemHealthBN { + #[serde(flatten)] + pub system_health: SystemHealth, + /// The name of the network that uses the most traffic. + pub network_name: String, + /// Total bytes received over the main interface. + pub network_bytes_total_received: u64, + /// Total bytes sent over the main interface. + pub network_bytes_total_transmit: u64, + + /// The current NAT status. + pub nat_open: bool, + /// The current number of connected peers. + pub connected_peers: usize, + /// The current syncing state of the consensus node. + pub sync_state: SyncState, +} + +/// Populates the system health. +fn observe_system_health( + sysinfo: Arc>, + data_dir: PathBuf, + app_uptime: u64, +) -> SystemHealth { + let sysinfo = sysinfo.read(); + let loadavg = sysinfo.load_average(); + + let cpus = sysinfo.cpus(); + + let disks = sysinfo.disks(); + + let system_uptime = sysinfo.uptime(); + + // Helper functions to extract specific data + + // Find fs associated with the data dir location and report this + let (disk_bytes_total, disk_bytes_free) = { + // There is no clean way to find this in an OS-agnostic way. We take a simple approach, + // which is attempt to match the mount_point to the data_dir. If this cannot be done, we + // just fallback to the root fs. + + let mut root_fs_disk = None; + let mut other_matching_fs = None; + + for disk in disks.iter() { + if disk.mount_point() == Path::new("/") + || disk.mount_point() == Path::new("C:\\") + || disk.mount_point() == Path::new("/System/Volumes/Data") + { + // Found the usual default root_fs + root_fs_disk = Some(disk); + continue; + } + + // If we have other file systems, compare these to the data_dir of Lighthouse and + // prioritize these. + if data_dir + .to_str() + .map(|path| { + if let Some(mount_str) = disk.mount_point().to_str() { + path.contains(mount_str) + } else { + false + } + }) + .unwrap_or(false) + { + other_matching_fs = Some(disk); + break; // Don't bother finding other competing fs. + } + } + + // If we found a file system other than the root, report this, otherwise just report the + // root fs + let fs = other_matching_fs.or(root_fs_disk); + + // If the root fs is not known, just add up the total of all known partitions + match fs { + Some(fs) => (fs.total_space(), fs.available_space()), + None => { + // If we can't find a known partition, just add them all up + disks.iter().fold((0, 0), |mut current_sizes, disk| { + current_sizes.0 += disk.total_space(); + current_sizes.1 += disk.available_space(); + current_sizes + }) + } + } + }; + + // Attempt to get the clock speed from the name of the CPU + let cpu_frequency_from_name = cpus.iter().next().and_then(|cpu| { + cpu.brand() + .split_once("GHz") + .and_then(|(result, _)| result.trim().rsplit_once(' ')) + .and_then(|(_, result)| result.parse::().ok()) + }); + + let global_cpu_frequency = match cpu_frequency_from_name { + Some(freq) => freq, + None => { + // Get the frequency from average measured frequencies + let global_cpu_frequency: f32 = + cpus.iter().map(|cpu| cpu.frequency()).sum::() as f32 / cpus.len() as f32; + // Shift to ghz to 1dp + (global_cpu_frequency / 100.0).round() / 10.0 + } + }; + + SystemHealth { + total_memory: sysinfo.total_memory(), + free_memory: sysinfo.free_memory(), + used_memory: sysinfo.used_memory(), + sys_loadavg_1: loadavg.one, + sys_loadavg_5: loadavg.five, + sys_loadavg_15: loadavg.fifteen, + cpu_cores: sysinfo.physical_core_count().unwrap_or(0), + cpu_threads: cpus.len(), + global_cpu_frequency, + disk_bytes_total, + disk_bytes_free, + system_uptime, + app_uptime, + system_name: sysinfo.name().unwrap_or_else(|| String::from("")), + kernel_version: sysinfo.kernel_version().unwrap_or_else(|| "".into()), + os_version: sysinfo.long_os_version().unwrap_or_else(|| "".into()), + host_name: sysinfo.host_name().unwrap_or_else(|| "".into()), + } +} + +/// Observes the Validator client system health. +pub fn observe_system_health_vc( + sysinfo: Arc>, + data_dir: PathBuf, + app_uptime: u64, +) -> SystemHealthVC { + SystemHealthVC { + system_health: observe_system_health(sysinfo, data_dir, app_uptime), + } +} + +/// Observes the Beacon Node system health. +pub fn observe_system_health_bn( + sysinfo: Arc>, + data_dir: PathBuf, + app_uptime: u64, + network_globals: Arc>, +) -> SystemHealthBN { + let system_health = observe_system_health(sysinfo.clone(), data_dir, app_uptime); + + // Find the network with the most traffic and assume this is the main network + let sysinfo = sysinfo.read(); + let networks = sysinfo.networks(); + let (network_name, network_bytes_total_received, network_bytes_total_transmit) = networks + .iter() + .max_by_key(|(_name, network)| network.total_received()) + .map(|(name, network)| { + ( + name.clone(), + network.total_received(), + network.total_transmitted(), + ) + }) + .unwrap_or_else(|| (String::from("None"), 0, 0)); + + // Determine if the NAT is open or not. + let nat_open = lighthouse_network::metrics::NAT_OPEN + .as_ref() + .map(|v| v.get()) + .unwrap_or(0) + != 0; + + SystemHealthBN { + system_health, + network_name, + network_bytes_total_received, + network_bytes_total_transmit, + nat_open, + connected_peers: network_globals.connected_peers(), + sync_state: network_globals.sync_state(), + } +} diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index c0023f3505..5d0c12b5f8 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -98,10 +98,9 @@ fn parse_client_config( cli_args: &ArgMatches, _env: &Environment, ) -> Result { - let mut client_config = ClientConfig { - data_dir: get_data_dir(cli_args), - ..Default::default() - }; + let mut client_config = ClientConfig::default(); + + client_config.set_data_dir(get_data_dir(cli_args)); if let Some(freezer_dir) = clap_utils::parse_optional(cli_args, "freezer-dir")? { client_config.freezer_db_path = Some(freezer_dir); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index d69361a3a4..0d70049250 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -56,7 +56,9 @@ impl CommandLineTestExec for CommandLineTest { fn datadir_flag() { CommandLineTest::new() .run_with_zero_port() - .with_config_and_dir(|config, dir| assert_eq!(config.data_dir, dir.path().join("beacon"))); + .with_config_and_dir(|config, dir| { + assert_eq!(*config.data_dir(), dir.path().join("beacon")) + }); } #[test] diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 0933bff4c6..d0a4ef9491 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -48,7 +48,7 @@ impl LocalBeaconNode { .tempdir() .expect("should create temp directory for client datadir"); - client_config.data_dir = datadir.path().into(); + client_config.set_data_dir(datadir.path().into()); client_config.network.network_dir = PathBuf::from(datadir.path()).join("network"); ProductionBeaconNode::new(context, client_config) diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 8a3c8303a9..ada023f8c5 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -59,3 +59,6 @@ task_executor = { path = "../common/task_executor" } reqwest = { version = "0.11.0", features = ["json","stream"] } url = "2.2.2" malloc_utils = { path = "../common/malloc_utils" } +sysinfo = "0.26.5" +system_health = { path = "../common/system_health" } + diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index e9c7bf69d4..df5d0c606e 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -16,6 +16,7 @@ use eth2::lighthouse_vc::{ types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; +use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, info, warn, Logger}; use slot_clock::SlotClock; @@ -24,6 +25,8 @@ use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::sync::Arc; +use sysinfo::{System, SystemExt}; +use system_health::observe_system_health_vc; use task_executor::TaskExecutor; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; @@ -183,6 +186,35 @@ pub fn serve( let api_token_path_inner = api_token_path.clone(); let api_token_path_filter = warp::any().map(move || api_token_path_inner.clone()); + // Create a `warp` filter that provides access to local system information. + let system_info = Arc::new(RwLock::new(sysinfo::System::new())); + { + // grab write access for initialisation + let mut system_info = system_info.write(); + system_info.refresh_disks_list(); + system_info.refresh_networks_list(); + } // end lock + + let system_info_filter = + warp::any() + .map(move || system_info.clone()) + .map(|sysinfo: Arc>| { + { + // refresh stats + let mut sysinfo_lock = sysinfo.write(); + sysinfo_lock.refresh_memory(); + sysinfo_lock.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything()); + sysinfo_lock.refresh_cpu(); + sysinfo_lock.refresh_system(); + sysinfo_lock.refresh_networks(); + sysinfo_lock.refresh_disks(); + } // end lock + sysinfo + }); + + let app_start = std::time::Instant::now(); + let app_start_filter = warp::any().map(move || app_start); + // GET lighthouse/version let get_node_version = warp::path("lighthouse") .and(warp::path("version")) @@ -279,6 +311,24 @@ pub fn serve( }, ); + // GET lighthouse/ui/health + let get_lighthouse_ui_health = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("health")) + .and(warp::path::end()) + .and(system_info_filter) + .and(app_start_filter) + .and(validator_dir_filter.clone()) + .and(signer.clone()) + .and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| { + blocking_signed_json_task(signer, move || { + let app_uptime = app_start.elapsed().as_secs() as u64; + Ok(api_types::GenericResponse::from(observe_system_health_vc( + sysinfo, val_dir, app_uptime, + ))) + }) + }); + // POST lighthouse/validators/ let post_validators = warp::path("lighthouse") .and(warp::path("validators")) @@ -894,6 +944,7 @@ pub fn serve( .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) + .or(get_lighthouse_ui_health) .or(get_fee_recipient) .or(get_gas_limit) .or(get_std_keystores) From 05178848e50866140c588d5d1fe56a98ed90a3f8 Mon Sep 17 00:00:00 2001 From: Daniel Ramirez Chiquillo Date: Tue, 15 Nov 2022 05:21:36 +0000 Subject: [PATCH 23/37] compile with beta compiler on CI (#3717) ## Issue Addressed Closes #3709 ## Proposed Changes Add the job `compile-with-beta-compiler` to `test-suite`. This job has the following steps: 1. Use `actions/checkout@v3`. (Needed to run make in a later step.) 2. Install the dependencies listed in [build from source guide](https://lighthouse-book.sigmaprime.io/installation-source.html). 3. Change the compiler to the current beta version with `rustup override`. 4. Run `make`. --- .github/workflows/test-suite.yml | 11 +++++++++++ bors.toml | 3 ++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 423f3deca2..d536869e45 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -418,3 +418,14 @@ jobs: env: # Allow warnings on Nightly RUSTFLAGS: "" + compile-with-beta-compiler: + name: compile-with-beta-compiler + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install dependencies + run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler + - name: Use Rust beta + run: rustup override set beta + - name: Run make + run: make diff --git a/bors.toml b/bors.toml index 6edf55bfa3..dbe92c68f4 100644 --- a/bors.toml +++ b/bors.toml @@ -23,7 +23,8 @@ status = [ "check-msrv", "slasher-tests", "syncing-simulator-ubuntu", - "disallowed-from-async-lint" + "disallowed-from-async-lint", + "compile-with-beta-compiler" ] use_squash_merge = true timeout_sec = 10800 From 713b6a18d4492ae39bdaa9f79656934559db781c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 15 Nov 2022 05:21:48 +0000 Subject: [PATCH 24/37] Simplify GossipTopic -> String conversion (#3722) ## Proposed Changes With a few different changes to the gossip topics in flight (light clients, Capella, 4844, etc) I think this simplification makes sense. I noticed it while plumbing through a new Capella topic. --- .../lighthouse_network/src/types/topics.rs | 25 ++----------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 825b1088b2..47d703c260 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -172,29 +172,8 @@ impl From for Topic { impl From for String { fn from(topic: GossipTopic) -> String { - let encoding = match topic.encoding { - GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX, - }; - - let kind = match topic.kind { - GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), - GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), - GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), - GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), - GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(), - GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,), - GossipKind::SignedContributionAndProof => SIGNED_CONTRIBUTION_AND_PROOF_TOPIC.into(), - GossipKind::SyncCommitteeMessage(index) => { - format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) - } - }; - format!( - "/{}/{}/{}/{}", - TOPIC_PREFIX, - hex::encode(topic.fork_digest), - kind, - encoding - ) + // Use the `Display` implementation below. + topic.to_string() } } From 857ef25d28130ab9064ad19e003a0a0a99370100 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 15 Nov 2022 13:25:38 +0000 Subject: [PATCH 25/37] Add metrics for subnet queries (#3721) ## Issue Addressed N/A ## Proposed Changes Add metrics for peers discovered in subnet discv5 queries. --- .../lighthouse_network/src/discovery/mod.rs | 17 +++++++++++++++++ beacon_node/lighthouse_network/src/metrics.rs | 13 +++++++++++++ 2 files changed, 30 insertions(+) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 3535c6bd9a..8e528f09d2 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -834,6 +834,17 @@ impl Discovery { // Map each subnet query's min_ttl to the set of ENR's returned for that subnet. queries.iter().for_each(|query| { + let query_str = match query.subnet { + Subnet::Attestation(_) => "attestation", + Subnet::SyncCommittee(_) => "sync_committee", + }; + + if let Some(v) = metrics::get_int_counter( + &metrics::TOTAL_SUBNET_QUERIES, + &[query_str], + ) { + v.inc(); + } // A subnet query has completed. Add back to the queue, incrementing retries. self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1); @@ -845,6 +856,12 @@ impl Discovery { .filter(|enr| subnet_predicate(enr)) .map(|enr| enr.peer_id()) .for_each(|peer_id| { + if let Some(v) = metrics::get_int_counter( + &metrics::SUBNET_PEERS_FOUND, + &[query_str], + ) { + v.inc(); + } let other_min_ttl = mapped_results.get_mut(&peer_id); // map peer IDs to the min_ttl furthest in the future diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 66d7a1f74a..2ee224d5e2 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -112,6 +112,19 @@ lazy_static! { &["client"] ); + pub static ref SUBNET_PEERS_FOUND: Result = + try_create_int_counter_vec( + "discovery_query_peers_found", + "Total number of peers found in attestation subnets and sync subnets", + &["type"] + ); + pub static ref TOTAL_SUBNET_QUERIES: Result = + try_create_int_counter_vec( + "discovery_total_queries", + "Total number of discovery subnet queries", + &["type"] + ); + /* * Inbound/Outbound peers */ From 8a36acdb1a1fcc9cd27026e2a3aefc07e1530af1 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 21 Nov 2022 03:15:54 +0000 Subject: [PATCH 26/37] Super small improvement: Remove unnecessary `mut` (#3736) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed Removed some unnecessary `mut`. 🙂 --- account_manager/src/validator/create.rs | 2 +- beacon_node/genesis/tests/tests.rs | 2 +- database_manager/src/lib.rs | 2 +- lcli/src/block_root.rs | 2 +- lcli/src/eth1_genesis.rs | 2 +- lcli/src/skip_slots.rs | 2 +- lcli/src/transition_blocks.rs | 2 +- lighthouse/environment/src/lib.rs | 4 ++-- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index bbd2cbc999..da01121055 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -114,7 +114,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { pub fn cli_run( matches: &ArgMatches, - mut env: Environment, + env: Environment, validator_dir: PathBuf, ) -> Result<(), String> { let spec = env.core_context().eth2_config.spec; diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 58f28702b0..aaf6a7bea1 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -24,7 +24,7 @@ pub fn new_env() -> Environment { #[test] fn basic() { - let mut env = new_env(); + let env = new_env(); let log = env.core_context().log().clone(); let mut spec = env.eth2_config().spec.clone(); diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 5d0c12b5f8..33accfc057 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -288,7 +288,7 @@ pub fn prune_payloads( } /// Run the database manager, returning an error string if the operation did not succeed. -pub fn run(cli_args: &ArgMatches<'_>, mut env: Environment) -> Result<(), String> { +pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result<(), String> { let client_config = parse_client_config(cli_args, &env)?; let context = env.core_context(); let log = context.log().clone(); diff --git a/lcli/src/block_root.rs b/lcli/src/block_root.rs index 7631872c5c..a47b48a30a 100644 --- a/lcli/src/block_root.rs +++ b/lcli/src/block_root.rs @@ -37,7 +37,7 @@ use types::{EthSpec, FullPayload, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(5); -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let spec = &T::default_spec(); let executor = env.core_context().executor; diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 80bcff9094..34144cd86d 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -13,7 +13,7 @@ use types::EthSpec; pub const ETH1_GENESIS_UPDATE_INTERVAL: Duration = Duration::from_millis(7_000); pub fn run( - mut env: Environment, + env: Environment, testnet_dir: PathBuf, matches: &ArgMatches<'_>, ) -> Result<(), String> { diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 8bd9af99ad..49d1dd424d 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -59,7 +59,7 @@ use types::{BeaconState, CloneConfig, EthSpec, Hash256}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let spec = &T::default_spec(); let executor = env.core_context().executor; diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index b25cec81b5..84d0a51765 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -94,7 +94,7 @@ struct Config { exclude_post_block_thc: bool, } -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let spec = &T::default_spec(); let executor = env.core_context().executor; diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 49163b96f4..c5b58581d2 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -380,7 +380,7 @@ impl Environment { } /// Returns a `Context` where no "service" has been added to the logger output. - pub fn core_context(&mut self) -> RuntimeContext { + pub fn core_context(&self) -> RuntimeContext { RuntimeContext { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), @@ -395,7 +395,7 @@ impl Environment { } /// Returns a `Context` where the `service_name` is added to the logger output. - pub fn service_context(&mut self, service_name: String) -> RuntimeContext { + pub fn service_context(&self, service_name: String) -> RuntimeContext { RuntimeContext { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), From e3729533a10fd7ef1d528fd5199c90f2da46971e Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 21 Nov 2022 06:29:02 +0000 Subject: [PATCH 27/37] Schedule gnosis merge (#3729) ## Issue Addressed N/A ## Proposed Changes Schedule Gnosis merge - Upstream config PR: https://github.com/gnosischain/configs/pull/3 - Nethermind PR: https://github.com/NethermindEth/nethermind/pull/4901 - Public announcement: https://twitter.com/gnosischain/status/1592589482641223682 ## Additional Info N/A Co-authored-by: Michael Sproul --- .../built_in_network_configs/gnosis/config.yaml | 6 +++--- common/eth2_network_config/src/lib.rs | 11 +++++++++-- consensus/types/src/chain_spec.rs | 15 ++++++--------- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 7987899c3d..d55ef3f3b5 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -6,8 +6,8 @@ PRESET_BASE: 'gnosis' # Transition # --------------------------------------------------------------- -# TBD, 2**256-2**10 is a placeholder -TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# Estimated on Dec 5, 2022 +TERMINAL_TOTAL_DIFFICULTY: 8626000000000000000000058750000000000000000000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 @@ -35,7 +35,7 @@ ALTAIR_FORK_VERSION: 0x01000064 ALTAIR_FORK_EPOCH: 512 # Merge BELLATRIX_FORK_VERSION: 0x02000064 -BELLATRIX_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_EPOCH: 385536 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 2bfd003266..7aef784373 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -226,7 +226,7 @@ mod tests { use super::*; use ssz::Encode; use tempfile::Builder as TempBuilder; - use types::{Config, Eth1Data, GnosisEthSpec, Hash256, MainnetEthSpec, GNOSIS}; + use types::{Config, Eth1Data, GnosisEthSpec, Hash256, MainnetEthSpec}; type E = MainnetEthSpec; @@ -250,6 +250,13 @@ mod tests { assert_eq!(spec, config.chain_spec::().unwrap()); } + #[test] + fn gnosis_config_eq_chain_spec() { + let config = Eth2NetworkConfig::from_hardcoded_net(&GNOSIS).unwrap(); + let spec = ChainSpec::gnosis(); + assert_eq!(spec, config.chain_spec::().unwrap()); + } + #[test] fn mainnet_genesis_state() { let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap(); @@ -270,7 +277,7 @@ mod tests { .unwrap_or_else(|_| panic!("{:?}", net.name)); // Ensure we can parse the YAML config to a chain spec. - if net.name == GNOSIS { + if net.name == types::GNOSIS { config.chain_spec::().unwrap(); } else { config.chain_spec::().unwrap(); diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index f68e65d7d5..c8333868cd 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -777,7 +777,7 @@ impl ChainSpec { domain_sync_committee_selection_proof: 8, domain_contribution_and_proof: 9, altair_fork_version: [0x01, 0x00, 0x00, 0x64], - altair_fork_epoch: Some(Epoch::new(256)), + altair_fork_epoch: Some(Epoch::new(512)), /* * Merge hard fork params @@ -788,14 +788,11 @@ impl ChainSpec { .expect("pow does not overflow"), proportional_slashing_multiplier_bellatrix: 3, bellatrix_fork_version: [0x02, 0x00, 0x00, 0x64], - bellatrix_fork_epoch: None, - terminal_total_difficulty: Uint256::MAX - .checked_sub(Uint256::from(2u64.pow(10))) - .expect("subtraction does not overflow") - // Add 1 since the spec declares `2**256 - 2**10` and we use - // `Uint256::MAX` which is `2*256- 1`. - .checked_add(Uint256::one()) - .expect("addition does not overflow"), + bellatrix_fork_epoch: Some(Epoch::new(385536)), + terminal_total_difficulty: Uint256::from_dec_str( + "8626000000000000000000058750000000000000000000", + ) + .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, From b477c4274880de1e39b074b6e51c95dacee5d03f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Nov 2022 06:29:03 +0000 Subject: [PATCH 28/37] Lower deposit finalization error to warning (#3739) ## Issue Addressed Partially addresses #3707 ## Proposed Changes Drop `ERRO` log to `WARN` until we identify the exact conditions that lead to this case. Add a message which hopefully reassures users who only see this log once :sweat_smile: Add the block hash to the error message in case it will prove useful in debugging the root cause. --- beacon_node/eth1/src/service.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index f24b746cd4..31082394ba 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -751,10 +751,11 @@ impl Service { let deposit_count_to_finalize = eth1data_to_finalize.deposit_count; if deposit_count_to_finalize > already_finalized { match self.finalize_deposits(eth1data_to_finalize) { - Err(e) => error!( + Err(e) => warn!( self.log, "Failed to finalize deposit cache"; "error" => ?e, + "info" => "this should resolve on its own" ), Ok(()) => info!( self.log, @@ -814,9 +815,10 @@ impl Service { .block_by_hash(ð1_data.block_hash) .cloned() .ok_or_else(|| { - Error::FailedToFinalizeDeposit( - "Finalized block not found in block cache".to_string(), - ) + Error::FailedToFinalizeDeposit(format!( + "Finalized block not found in block cache: {:?}", + eth1_data.block_hash + )) })?; self.inner .deposit_cache From bf533c8e42cc73c35730e285c21df8add0195369 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 23 Nov 2022 23:38:32 +0000 Subject: [PATCH 29/37] v3.3.0 (#3741) ## Issue Addressed NA ## Proposed Changes - Bump versions - Pin the `nethermind` version since our method of getting the latest tags on `master` is giving us an old version (`1.14.1`). - Increase timeout for execution engine startup. ## Additional Info - [x] ~Awaiting further testing~ --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- testing/execution_engine_integration/src/nethermind.rs | 5 ++++- testing/execution_engine_integration/src/test_rig.rs | 2 +- 8 files changed, 15 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 588a76e78f..12d44d3e16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -448,7 +448,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.2.1" +version = "3.3.0" dependencies = [ "beacon_chain", "clap", @@ -606,7 +606,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.2.1" +version = "3.3.0" dependencies = [ "beacon_node", "clap", @@ -3185,7 +3185,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.2.1" +version = "3.3.0" dependencies = [ "account_utils", "beacon_chain", @@ -3651,7 +3651,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.2.1" +version = "3.3.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index b85aae2f4f..d47f77da93 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.2.1" +version = "3.3.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index a48ba211d9..afcbae513b 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.2.1-", - fallback = "Lighthouse/v3.2.1" + prefix = "Lighthouse/v3.3.0-", + fallback = "Lighthouse/v3.3.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index b4f630ae15..638ab46bfb 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.2.1" +version = "3.3.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 864869a149..b0c0580b08 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.2.1" +version = "3.3.0" authors = ["Sigma Prime "] edition = "2021" autotests = false diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 1fe7bf0f05..f643fbd5f2 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -8,7 +8,10 @@ use std::process::{Child, Command, Output}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const NETHERMIND_BRANCH: &str = "master"; +/// We've pinned the Nethermind version since our method of using the `master` branch to +/// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. +/// We should fix this so we always pull the latest version of Nethermind. +const NETHERMIND_BRANCH: &str = "release/1.14.6"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 6e9f37ff1f..5455b48bce 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -18,7 +18,7 @@ use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; -const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30); struct ExecutionPair { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. From d5a2de759bc089d122a95e17eccb0e4eebef752a Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 25 Nov 2022 05:19:00 +0000 Subject: [PATCH 30/37] Added LightClientBootstrap V1 (#3711) ## Issue Addressed Partially addresses #3651 ## Proposed Changes Adds server-side support for light_client_bootstrap_v1 topic ## Additional Info This PR, creates each time a bootstrap without using cache, I do not know how necessary a cache is in this case as this topic is not supposed to be called frequently and IMHO we can just prevent abuse by using the limiter, but let me know what you think or if there is any caveat to this, or if it is necessary only for the sake of good practice. Co-authored-by: Pawan Dhananjay --- beacon_node/beacon_chain/src/chain_config.rs | 3 - beacon_node/lighthouse_network/src/config.rs | 4 + .../src/peer_manager/mod.rs | 3 + .../src/rpc/codec/ssz_snappy.rs | 18 ++++- .../lighthouse_network/src/rpc/methods.rs | 18 ++++- beacon_node/lighthouse_network/src/rpc/mod.rs | 14 +++- .../lighthouse_network/src/rpc/outbound.rs | 12 ++- .../lighthouse_network/src/rpc/protocol.rs | 76 +++++++------------ .../src/rpc/rate_limiter.rs | 11 +++ .../src/service/api_types.rs | 14 +++- .../lighthouse_network/src/service/mod.rs | 21 ++++- .../network/src/beacon_processor/mod.rs | 45 ++++++++++- .../beacon_processor/worker/rpc_methods.rs | 75 +++++++++++++++++- beacon_node/network/src/router/mod.rs | 4 + beacon_node/network/src/router/processor.rs | 12 +++ beacon_node/src/config.rs | 6 +- lighthouse/tests/beacon_node.rs | 4 +- 17 files changed, 271 insertions(+), 69 deletions(-) diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index f970c5607e..286cc17a96 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -47,8 +47,6 @@ pub struct ChainConfig { pub count_unrealized_full: CountUnrealizedFull, /// Optionally set timeout for calls to checkpoint sync endpoint. pub checkpoint_sync_url_timeout: u64, - /// Whether to enable the light client server protocol. - pub enable_light_client_server: bool, } impl Default for ChainConfig { @@ -70,7 +68,6 @@ impl Default for ChainConfig { paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, - enable_light_client_server: false, } } } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 71566b8778..c8ef8809d4 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -130,6 +130,9 @@ pub struct Config { /// Whether metrics are enabled. pub metrics_enabled: bool, + + /// Whether light client protocols should be enabled. + pub enable_light_client_server: bool, } impl Default for Config { @@ -207,6 +210,7 @@ impl Default for Config { shutdown_after_sync: false, topics: Vec::new(), metrics_enabled: false, + enable_light_client_server: false, } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 0f29135956..8102fa82a0 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -501,6 +501,7 @@ impl PeerManager { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::LightClientBootstrap => PeerAction::LowToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -517,6 +518,7 @@ impl PeerManager { Protocol::BlocksByRange => return, Protocol::BlocksByRoot => return, Protocol::Goodbye => return, + Protocol::LightClientBootstrap => return, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, } @@ -531,6 +533,7 @@ impl PeerManager { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::LightClientBootstrap => return, Protocol::Goodbye => return, Protocol::MetaData => return, Protocol::Status => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index a46a05a8ce..a4dd602b3f 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -16,8 +16,8 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockMerge, + light_client_bootstrap::LightClientBootstrap, EthSpec, ForkContext, ForkName, Hash256, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -70,6 +70,7 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. @@ -230,6 +231,7 @@ impl Encoder> for SSZSnappyOutboundCodec< OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode + OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(), }; // SSZ encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { @@ -472,7 +474,11 @@ fn handle_v1_request( Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - + Protocol::LightClientBootstrap => Ok(Some(InboundRequest::LightClientBootstrap( + LightClientBootstrapRequest { + root: Hash256::from_ssz_bytes(decoded_buffer)?, + }, + ))), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. Protocol::MetaData => { @@ -544,6 +550,9 @@ fn handle_v1_response( Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1( MetaDataV1::from_ssz_bytes(decoded_buffer)?, )))), + Protocol::LightClientBootstrap => Ok(Some(RPCResponse::LightClientBootstrap( + LightClientBootstrap::from_ssz_bytes(decoded_buffer)?, + ))), } } @@ -867,6 +876,9 @@ mod tests { OutboundRequest::MetaData(metadata) => { assert_eq!(decoded, InboundRequest::MetaData(metadata)) } + OutboundRequest::LightClientBootstrap(bootstrap) => { + assert_eq!(decoded, InboundRequest::LightClientBootstrap(bootstrap)) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 26d755a6e0..5da595c3db 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -12,7 +12,9 @@ use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, +}; /// Maximum number of blocks in a single request. pub type MaxRequestBlocks = U1024; @@ -243,6 +245,9 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), + /// A response to a get LIGHTCLIENT_BOOTSTRAP request. + LightClientBootstrap(LightClientBootstrap), + /// A PONG response to a PING request. Pong(Ping), @@ -273,6 +278,12 @@ pub enum RPCCodedResponse { StreamTermination(ResponseTermination), } +/// Request a light_client_bootstrap for lightclients peers. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct LightClientBootstrapRequest { + pub root: Hash256, +} + /// The code assigned to an erroneous `RPCResponse`. #[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] @@ -321,6 +332,7 @@ impl RPCCodedResponse { RPCResponse::BlocksByRoot(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, + RPCResponse::LightClientBootstrap(_) => false, }, RPCCodedResponse::Error(_, _) => true, // Stream terminations are part of responses that have chunks @@ -355,6 +367,7 @@ impl RPCResponse { RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, + RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } } @@ -390,6 +403,9 @@ impl std::fmt::Display for RPCResponse { } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), + RPCResponse::LightClientBootstrap(bootstrap) => { + write!(f, "LightClientBootstrap Slot: {}", bootstrap.header.slot) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 7b0092ef71..203a642a8b 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -26,8 +26,8 @@ pub(crate) use protocol::{InboundRequest, RPCProtocol}; pub use handler::SubstreamId; pub use methods::{ - BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks, - RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, + BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, + MaxRequestBlocks, RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, }; pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; @@ -108,18 +108,24 @@ pub struct RPC { /// Queue of events to be processed. events: Vec, RPCHandler>>, fork_context: Arc, + enable_light_client_server: bool, /// Slog logger for RPC behaviour. log: slog::Logger, } impl RPC { - pub fn new(fork_context: Arc, log: slog::Logger) -> Self { + pub fn new( + fork_context: Arc, + enable_light_client_server: bool, + log: slog::Logger, + ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); let limiter = RPCRateLimiterBuilder::new() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) .n_every(Protocol::Ping, 2, Duration::from_secs(10)) .n_every(Protocol::Status, 5, Duration::from_secs(15)) .one_every(Protocol::Goodbye, Duration::from_secs(10)) + .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) .n_every( Protocol::BlocksByRange, methods::MAX_REQUEST_BLOCKS, @@ -132,6 +138,7 @@ impl RPC { limiter, events: Vec::new(), fork_context, + enable_light_client_server, log, } } @@ -188,6 +195,7 @@ where RPCProtocol { fork_context: self.fork_context.clone(), max_rpc_size: max_rpc_size(&self.fork_context), + enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, }, (), diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 7d5acc4364..774303800e 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -38,6 +38,7 @@ pub enum OutboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), } @@ -84,9 +85,12 @@ impl OutboundRequest { ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), ], + // Note: This match arm is technically unreachable as we only respond to light client requests + // that we generate from the beacon state. + // We do not make light client rpc requests from the beacon node + OutboundRequest::LightClientBootstrap(_) => vec![], } } - /* These functions are used in the handler for stream management */ /// Number of responses expected for this request. @@ -98,6 +102,7 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, + OutboundRequest::LightClientBootstrap(_) => 1, } } @@ -110,6 +115,7 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, OutboundRequest::Ping(_) => Protocol::Ping, OutboundRequest::MetaData(_) => Protocol::MetaData, + OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } @@ -121,6 +127,7 @@ impl OutboundRequest { // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + OutboundRequest::LightClientBootstrap(_) => unreachable!(), OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -178,6 +185,9 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), + OutboundRequest::LightClientBootstrap(bootstrap) => { + write!(f, "Lightclient Bootstrap: {}", bootstrap.root) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 81960214b1..1f40f81971 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -153,6 +153,8 @@ pub enum Protocol { Ping, /// The `MetaData` protocol name. MetaData, + /// The `LightClientBootstrap` protocol name. + LightClientBootstrap, } /// RPC Versions @@ -179,6 +181,7 @@ impl std::fmt::Display for Protocol { Protocol::BlocksByRoot => "beacon_blocks_by_root", Protocol::Ping => "ping", Protocol::MetaData => "metadata", + Protocol::LightClientBootstrap => "light_client_bootstrap", }; f.write_str(repr) } @@ -207,6 +210,7 @@ impl std::fmt::Display for Version { pub struct RPCProtocol { pub fork_context: Arc, pub max_rpc_size: usize, + pub enable_light_client_server: bool, pub phantom: PhantomData, } @@ -216,7 +220,7 @@ impl UpgradeInfo for RPCProtocol { /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { - vec![ + let mut supported_protocols = vec![ ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), // V2 variants have higher preference then V1 @@ -227,7 +231,15 @@ impl UpgradeInfo for RPCProtocol { ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), - ] + ]; + if self.enable_light_client_server { + supported_protocols.push(ProtocolId::new( + Protocol::LightClientBootstrap, + Version::V1, + Encoding::SSZSnappy, + )); + } + supported_protocols } } @@ -289,6 +301,10 @@ impl ProtocolId { ::ssz_fixed_len(), ::ssz_fixed_len(), ), + Protocol::LightClientBootstrap => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty } } @@ -312,6 +328,10 @@ impl ProtocolId { as Encode>::ssz_fixed_len(), as Encode>::ssz_fixed_len(), ), + Protocol::LightClientBootstrap => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), } } @@ -417,57 +437,13 @@ pub enum InboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), } -impl UpgradeInfo for InboundRequest { - type Info = ProtocolId; - type InfoIter = Vec; - - // add further protocols as we support more encodings/versions - fn protocol_info(&self) -> Self::InfoIter { - self.supported_protocols() - } -} - /// Implements the encoding per supported protocol for `RPCRequest`. impl InboundRequest { - pub fn supported_protocols(&self) -> Vec { - match self { - // add more protocols when versions/encodings are supported - InboundRequest::Status(_) => vec![ProtocolId::new( - Protocol::Status, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::Goodbye(_) => vec![ProtocolId::new( - Protocol::Goodbye, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::BlocksByRange(_) => vec![ - // V2 has higher preference when negotiating a stream - ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), - ], - InboundRequest::BlocksByRoot(_) => vec![ - // V2 has higher preference when negotiating a stream - ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), - ], - InboundRequest::Ping(_) => vec![ProtocolId::new( - Protocol::Ping, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::MetaData(_) => vec![ - ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), - ], - } - } - /* These functions are used in the handler for stream management */ /// Number of responses expected for this request. @@ -479,6 +455,7 @@ impl InboundRequest { InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, + InboundRequest::LightClientBootstrap(_) => 1, } } @@ -491,6 +468,7 @@ impl InboundRequest { InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, InboundRequest::Ping(_) => Protocol::Ping, InboundRequest::MetaData(_) => Protocol::MetaData, + InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } @@ -506,6 +484,7 @@ impl InboundRequest { InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), InboundRequest::MetaData(_) => unreachable!(), + InboundRequest::LightClientBootstrap(_) => unreachable!(), } } } @@ -609,6 +588,9 @@ impl std::fmt::Display for InboundRequest { InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), + InboundRequest::LightClientBootstrap(bootstrap) => { + write!(f, "LightClientBootstrap: {}", bootstrap.root) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 70b14c33de..6ba9f6e941 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -73,6 +73,8 @@ pub struct RPCRateLimiter { bbrange_rl: Limiter, /// BlocksByRoot rate limiter. bbroots_rl: Limiter, + /// LightClientBootstrap rate limiter. + lcbootstrap_rl: Limiter, } /// Error type for non conformant requests @@ -98,6 +100,8 @@ pub struct RPCRateLimiterBuilder { bbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, + /// Quota for the LightClientBootstrap protocol. + lcbootstrap_quota: Option, } impl RPCRateLimiterBuilder { @@ -116,6 +120,7 @@ impl RPCRateLimiterBuilder { Protocol::Goodbye => self.goodbye_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, + Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, } self } @@ -155,6 +160,9 @@ impl RPCRateLimiterBuilder { let bbrange_quota = self .bbrange_quota .ok_or("BlocksByRange quota not specified")?; + let lcbootstrap_quote = self + .lcbootstrap_quota + .ok_or("LightClientBootstrap quota not specified")?; // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; @@ -163,6 +171,7 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; + let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -176,6 +185,7 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, + lcbootstrap_rl, init_time: Instant::now(), }) } @@ -199,6 +209,7 @@ impl RPCRateLimiter { Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, + Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl, }; check(limiter) } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index e5d81737cf..849a86f51b 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,12 +1,12 @@ use std::sync::Arc; use libp2p::core::connection::ConnectionId; -use types::{EthSpec, SignedBeaconBlock}; +use types::{light_client_bootstrap::LightClientBootstrap, EthSpec, SignedBeaconBlock}; use crate::rpc::{ methods::{ - BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse, - RPCResponse, ResponseTermination, StatusMessage, + BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, + OldBlocksByRangeRequest, RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage, }, OutboundRequest, SubstreamId, }; @@ -34,6 +34,8 @@ pub enum Request { BlocksByRange(BlocksByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), + // light client bootstrap request + LightClientBootstrap(LightClientBootstrapRequest), } impl std::convert::From for OutboundRequest { @@ -47,6 +49,7 @@ impl std::convert::From for OutboundRequest { step: 1, }) } + Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b), Request::Status(s) => OutboundRequest::Status(s), } } @@ -66,6 +69,8 @@ pub enum Response { BlocksByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), + /// A response to a LightClientUpdate request. + LightClientBootstrap(LightClientBootstrap), } impl std::convert::From> for RPCCodedResponse { @@ -80,6 +85,9 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + Response::LightClientBootstrap(b) => { + RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) + } } } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 97d96d171d..a6f1ce20ad 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -259,7 +259,11 @@ impl Network { (gossipsub, update_gossipsub_scores) }; - let eth2_rpc = RPC::new(ctx.fork_context.clone(), log.clone()); + let eth2_rpc = RPC::new( + ctx.fork_context.clone(), + config.enable_light_client_server, + log.clone(), + ); let discovery = { // Build and start the discovery sub-behaviour @@ -978,6 +982,9 @@ impl Network { Request::Status(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) } + Request::LightClientBootstrap(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["light_client_bootstrap"]) + } Request::BlocksByRange { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) } @@ -1247,6 +1254,14 @@ impl Network { ); Some(event) } + InboundRequest::LightClientBootstrap(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::LightClientBootstrap(req), + ); + Some(event) + } } } Ok(RPCReceived::Response(id, resp)) => { @@ -1274,6 +1289,10 @@ impl Network { RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } + // Should never be reached + RPCResponse::LightClientBootstrap(bootstrap) => { + self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) + } } } Ok(RPCReceived::EndOfStream(id, termination)) => { diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index f477878ac0..aa4286b9cd 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -45,6 +45,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock}; use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; +use lighthouse_network::rpc::LightClientBootstrapRequest; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -156,6 +157,10 @@ const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; + /// The name of the manager tokio task. const MANAGER_TASK_NAME: &str = "beacon_processor_manager"; @@ -195,6 +200,7 @@ pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; +pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; @@ -557,6 +563,22 @@ impl WorkEvent { } } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. + pub fn lightclient_bootstrap_request( + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::LightClientBootstrapRequest { + peer_id, + request_id, + request, + }, + } + } + /// Get a `str` representation of the type of work this `WorkEvent` contains. pub fn work_type(&self) -> &'static str { self.work.str_id() @@ -733,6 +755,11 @@ pub enum Work { request_id: PeerRequestId, request: BlocksByRootRequest, }, + LightClientBootstrapRequest { + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + }, } impl Work { @@ -755,6 +782,7 @@ impl Work { Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, + Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, } @@ -898,7 +926,7 @@ impl BeaconProcessor { let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); - + let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); @@ -1137,6 +1165,8 @@ impl BeaconProcessor { } else if let Some(item) = backfill_chain_segment.pop() { self.spawn_worker(item, toolbox); // This statement should always be the final else statement. + } else if let Some(item) = lcbootstrap_queue.pop() { + self.spawn_worker(item, toolbox); } else { // Let the journal know that a worker is freed and there's nothing else // for it to do. @@ -1237,6 +1267,9 @@ impl BeaconProcessor { Work::BlocksByRootsRequest { .. } => { bbroots_queue.push(work, work_id, &self.log) } + Work::LightClientBootstrapRequest { .. } => { + lcbootstrap_queue.push(work, work_id, &self.log) + } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) } @@ -1594,6 +1627,16 @@ impl BeaconProcessor { request, ) }), + /* + * Processing of lightclient bootstrap requests from other peers. + */ + Work::LightClientBootstrapRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking(move || { + worker.handle_light_client_bootstrap(peer_id, request_id, request) + }), Work::UnknownBlockAttestation { message_id, peer_id, diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 37aee01716..3e354a70d2 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -11,7 +11,7 @@ use slog::{debug, error}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; use super::Worker; @@ -204,6 +204,79 @@ impl Worker { ) } + /// Handle a `BlocksByRoot` request from the peer. + pub fn handle_light_client_bootstrap( + self, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) { + let block_root = request.root; + let state_root = match self.chain.get_blinded_block(&block_root) { + Ok(signed_block) => match signed_block { + Some(signed_block) => signed_block.state_root(), + None => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + let mut beacon_state = match self.chain.get_state(&state_root, None) { + Ok(beacon_state) => match beacon_state { + Some(state) => state, + None => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + let bootstrap = match LightClientBootstrap::from_beacon_state(&mut beacon_state) { + Ok(bootstrap) => bootstrap, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + self.send_response( + peer_id, + Response::LightClientBootstrap(bootstrap), + request_id, + ) + } + /// Handle a `BlocksByRange` request from the peer. pub fn handle_blocks_by_range_request( self, diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 03b877506f..5df308f259 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -168,6 +168,9 @@ impl Router { Request::BlocksByRoot(request) => self .processor .on_blocks_by_root_request(peer_id, id, request), + Request::LightClientBootstrap(request) => self + .processor + .on_lightclient_bootstrap(peer_id, id, request), } } @@ -192,6 +195,7 @@ impl Router { self.processor .on_blocks_by_root_response(peer_id, request_id, beacon_block); } + Response::LightClientBootstrap(_) => unreachable!(), } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index ce11cbdcef..3c9a4a81fb 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -160,6 +160,18 @@ impl Processor { )) } + /// Handle a `LightClientBootstrap` request from the peer. + pub fn on_lightclient_bootstrap( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::lightclient_bootstrap_request( + peer_id, request_id, request, + )) + } + /// Handle a `BlocksByRange` request from the peer. pub fn on_blocks_by_range_request( &mut self, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 406074fe38..472708ecb8 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -708,9 +708,6 @@ pub fn get_config( client_config.chain.builder_fallback_disable_checks = cli_args.is_present("builder-fallback-disable-checks"); - // Light client server config. - client_config.chain.enable_light_client_server = cli_args.is_present("light-client-server"); - Ok(client_config) } @@ -922,6 +919,9 @@ pub fn set_network_config( config.discv5_config.table_filter = |_| true; } + // Light client server config. + config.enable_light_client_server = cli_args.is_present("light-client-server"); + Ok(()) } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 0d70049250..b1ad50092c 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1587,7 +1587,7 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.enable_light_client_server, false)); + .with_config(|config| assert_eq!(config.network.enable_light_client_server, false)); } #[test] @@ -1595,5 +1595,5 @@ fn light_client_server_enabled() { CommandLineTest::new() .flag("light-client-server", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.enable_light_client_server, true)); + .with_config(|config| assert_eq!(config.network.enable_light_client_server, true)); } From e9bf7f7cc1bb908a2e1aa4164a1966df591e8ab0 Mon Sep 17 00:00:00 2001 From: antondlr Date: Fri, 25 Nov 2022 07:57:10 +0000 Subject: [PATCH 31/37] remove commas from comma-separated kv pairs (#3737) ## Issue Addressed Logs are in comma separated kv list, but the values sometimes contain commas, which breaks parsing --- beacon_node/lighthouse_network/src/peer_manager/mod.rs | 2 +- .../src/peer_manager/network_behaviour.rs | 2 +- beacon_node/lighthouse_network/src/rpc/handler.rs | 2 +- .../network/src/beacon_processor/worker/gossip_methods.rs | 6 +++--- beacon_node/network/src/beacon_processor/worker/mod.rs | 2 +- .../network/src/beacon_processor/worker/sync_methods.rs | 2 +- beacon_node/network/src/sync/manager.rs | 2 +- beacon_node/network/src/sync/network_context.rs | 4 ++-- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 8102fa82a0..a468239a9e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -405,7 +405,7 @@ impl PeerManager { debug!(self.log, "Identified Peer"; "peer" => %peer_id, "protocol_version" => &info.protocol_version, "agent_version" => &info.agent_version, - "listening_ addresses" => ?info.listen_addrs, + "listening_addresses" => ?info.listen_addrs, "observed_address" => ?info.observed_addr, "protocols" => ?info.protocols ); diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index c84e368f16..175dfaf018 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -139,7 +139,7 @@ impl NetworkBehaviour for PeerManager { // TODO: directly emit the ban event? BanResult::BadScore => { // This is a faulty state - error!(self.log, "Connected to a banned peer, re-banning"; "peer_id" => %peer_id); + error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); // Reban the peer self.goodbye_peer(peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); return; diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 9ac062adc4..9d6229eb38 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -285,7 +285,7 @@ where } else { if !matches!(response, RPCCodedResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses - trace!(self.log, "Inbound stream has expired, response not sent"; + trace!(self.log, "Inbound stream has expired. Response not sent"; "response" => %response, "id" => inbound_id); } return; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index eaf5cd005c..4f1fd2cede 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -793,7 +793,7 @@ impl Worker { | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::RepeatProposal { .. }) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { - debug!(self.log, "Could not verify block for gossip, ignoring the block"; + debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); // Prevent recurring behaviour by penalizing the peer slightly. self.gossip_penalize_peer( @@ -805,7 +805,7 @@ impl Worker { return None; } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { - debug!(self.log, "Could not verify block for gossip, ignoring the block"; + debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; @@ -827,7 +827,7 @@ impl Worker { // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::GenesisBlock) => { - warn!(self.log, "Could not verify block for gossip, rejecting the block"; + warn!(self.log, "Could not verify block for gossip. Rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs index f907c49b7d..1cbc64b632 100644 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ b/beacon_node/network/src/beacon_processor/worker/mod.rs @@ -38,7 +38,7 @@ impl Worker { /// Creates a log if there is an internal error. fn send_network_message(&self, message: NetworkMessage) { self.network_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the network service, likely shutdown"; + debug!(self.log, "Could not send message to the network service. Likely shutdown"; "error" => %e) }); } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 5d97894fe4..eebf751359 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -428,7 +428,7 @@ impl Worker { } else { // The block is in the future, but not too far. debug!( - self.log, "Block is slightly ahead of our slot clock, ignoring."; + self.log, "Block is slightly ahead of our slot clock. Ignoring."; "present_slot" => present_slot, "block_slot" => block_slot, "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index cdef904715..230c883a93 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -633,7 +633,7 @@ impl SyncManager { // Some logs. if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 { - debug!(self.log, "Execution engine not online, dropping active requests."; + debug!(self.log, "Execution engine not online. Dropping active requests."; "dropped_single_blocks_requests" => dropped_single_blocks_requests, "dropped_parent_chain_requests" => dropped_parent_chain_requests, ); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 45ade7034c..c81fed2443 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -242,7 +242,7 @@ impl SyncNetworkContext { source: ReportSource::SyncService, }) .unwrap_or_else(|_| { - warn!(self.log, "Could not report peer, channel failed"); + warn!(self.log, "Could not report peer: channel failed"); }); } @@ -257,7 +257,7 @@ impl SyncNetworkContext { msg, }) .unwrap_or_else(|e| { - warn!(self.log, "Could not report peer, channel failed"; "error"=> %e); + warn!(self.log, "Could not report peer: channel failed"; "error"=> %e); }); } From 969ff240cde7f533ac2c6e0deef54cae57d1a23f Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 25 Nov 2022 07:57:11 +0000 Subject: [PATCH 32/37] Add CLI flag to opt in to world-readable log files (#3747) ## Issue Addressed #3732 ## Proposed Changes Add a CLI flag to allow users to opt out of the restrictive permissions of the log files. ## Additional Info This is not recommended for most users. The log files can contain sensitive information such as validator indices, public keys and API tokens (see #2438). However some users using a multi-user setup may find this helpful if they understand the risks involved. --- lcli/src/main.rs | 1 + lighthouse/environment/src/lib.rs | 4 +++- lighthouse/src/main.rs | 12 ++++++++++++ lighthouse/tests/beacon_node.rs | 17 +++++++++++++++++ testing/simulator/src/eth1_sim.rs | 1 + testing/simulator/src/no_eth1_sim.rs | 1 + testing/simulator/src/sync_sim.rs | 1 + 7 files changed, 36 insertions(+), 1 deletion(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 8b233d847b..9d548b0499 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -789,6 +789,7 @@ fn run( max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, }) .map_err(|e| format!("should start logger: {:?}", e))? .build() diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index c5b58581d2..fad7edeb19 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -55,6 +55,7 @@ pub struct LoggerConfig { pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, + pub is_restricted: bool, } impl Default for LoggerConfig { fn default() -> Self { @@ -68,6 +69,7 @@ impl Default for LoggerConfig { max_log_size: 200, max_log_number: 5, compression: false, + is_restricted: true, } } } @@ -257,7 +259,7 @@ impl EnvironmentBuilder { .rotate_size(config.max_log_size) .rotate_keep(config.max_log_number) .rotate_compress(config.compression) - .restrict_permissions(true) + .restrict_permissions(config.is_restricted) .build() .map_err(|e| format!("Unable to build file logger: {}", e))?; diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 9dc0902e06..da72204f96 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -129,6 +129,15 @@ fn main() { to store old logs.") .global(true), ) + .arg( + Arg::with_name("logfile-no-restricted-perms") + .long("logfile-no-restricted-perms") + .help( + "If present, log files will be generated as world-readable meaning they can be read by \ + any user on the machine. Note that logs can often contain sensitive information \ + about your validator and so this flag should be used with caution.") + .global(true), + ) .arg( Arg::with_name("log-format") .long("log-format") @@ -407,6 +416,8 @@ fn run( let logfile_compress = matches.is_present("logfile-compress"); + let logfile_restricted = !matches.is_present("logfile-no-restricted-perms"); + // Construct the path to the log file. let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; if log_path.is_none() { @@ -446,6 +457,7 @@ fn run( max_log_size: logfile_max_size * 1_024 * 1_024, max_log_number: logfile_max_number, compression: logfile_compress, + is_restricted: logfile_restricted, }; let builder = environment_builder.initialize_logger(logger_config.clone())?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index b1ad50092c..7b46fd6a91 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1548,6 +1548,23 @@ fn enabled_disable_log_timestamp_flag() { assert!(config.logger_config.disable_log_timestamp); }); } +#[test] +fn logfile_restricted_perms_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.is_restricted); + }); +} +#[test] +fn logfile_no_restricted_perms_flag() { + CommandLineTest::new() + .flag("logfile-no-restricted-perms", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.is_restricted == false); + }); +} #[test] fn sync_eth1_chain_default() { diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 3d59013f2a..8284bff609 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -67,6 +67,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 06f9e9a4f3..53c4447da2 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -52,6 +52,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 00e439e4c9..1c8b41f057 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -56,6 +56,7 @@ fn syncing_sim( max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; From c881b803679760995d637c60c0afc86e0012cca4 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 28 Nov 2022 00:22:53 +0000 Subject: [PATCH 33/37] Add CLI flag for gui requirements (#3731) ## Issue Addressed #3723 ## Proposed Changes Adds a new CLI flag `--gui` which enables all the various flags required for the gui to function properly. Currently enables the `--http` and `--validator-monitor-auto` flags. --- beacon_node/src/cli.rs | 8 ++++++++ beacon_node/src/config.rs | 6 ++++++ lighthouse/tests/beacon_node.rs | 11 +++++++++++ 3 files changed, 25 insertions(+) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index b00d56513c..44a995176d 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -875,4 +875,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { [experimental]") .takes_value(false) ) + .arg( + Arg::with_name("gui") + .long("gui") + .hidden(true) + .help("Enable the graphical user interface and all its requirements. \ + This is equivalent to --http and --validator-monitor-auto.") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 472708ecb8..85f0224982 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -708,6 +708,12 @@ pub fn get_config( client_config.chain.builder_fallback_disable_checks = cli_args.is_present("builder-fallback-disable-checks"); + // Graphical user interface config. + if cli_args.is_present("gui") { + client_config.http_api.enabled = true; + client_config.validator_monitor_auto = true; + } + Ok(client_config) } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7b46fd6a91..d39235cb13 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1614,3 +1614,14 @@ fn light_client_server_enabled() { .run_with_zero_port() .with_config(|config| assert_eq!(config.network.enable_light_client_server, true)); } + +#[test] +fn gui_flag() { + CommandLineTest::new() + .flag("gui", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.http_api.enabled); + assert!(config.validator_monitor_auto); + }); +} From 27790170760f33b2b4b5344dad10fb225a468bb1 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 28 Nov 2022 07:36:52 +0000 Subject: [PATCH 34/37] Gossipsub fast message id change (#3755) For improved consistency, this mixes in the topic into our fast message id for more consistent tracking of messages across topics. --- beacon_node/lighthouse_network/src/config.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index c8ef8809d4..0ae3d9a23b 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -288,9 +288,11 @@ impl From for NetworkLoad { /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { // The function used to generate a gossipsub message id - // We use the first 8 bytes of SHA256(data) for content addressing - let fast_gossip_message_id = - |message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..8]); + // We use the first 8 bytes of SHA256(topic, data) for content addressing + let fast_gossip_message_id = |message: &RawGossipsubMessage| { + let data = [message.topic.as_str().as_bytes(), &message.data].concat(); + FastMessageId::from(&Sha256::digest(data)[..8]) + }; fn prefix( prefix: [u8; 4], message: &GossipsubMessage, From 99ec9d9bafd21cee3197162455c41f4e388559ed Mon Sep 17 00:00:00 2001 From: kevinbogner Date: Mon, 28 Nov 2022 10:05:43 +0000 Subject: [PATCH 35/37] Add Run a Node guide (#3681) ## Issue Addressed Related to #3672 ## Proposed Changes - Added a guide to run a node. Mainly, copy and paste from 'Merge Migration' and 'Checkpoint Sync'. - Ranked it high in ToC: - Introduction - Installation - Run a Node - Become a Validator ... - Hid 'Merge Migration' in ToC. ## Additional Info - Should I add/rephrase/delete something? - Now there is some redundancy: - 'Run a node' and 'Checkpoint Sync' contain similar information. - Same for 'Run a node' and 'Become a Validator'. Co-authored-by: kevinbogner <114221396+kevinbogner@users.noreply.github.com> Co-authored-by: Michael Sproul --- book/src/SUMMARY.md | 7 +- book/src/intro.md | 1 + book/src/merge-migration.md | 1 + book/src/run_a_node.md | 171 ++++++++++++++++++++++++++++++++++++ 4 files changed, 177 insertions(+), 3 deletions(-) create mode 100644 book/src/run_a_node.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index d05677465b..a43fa10e64 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -1,9 +1,6 @@ # Summary * [Introduction](./intro.md) -* [Become a Validator](./mainnet-validator.md) - * [Become a Testnet Validator](./testnet-validator.md) -* [Merge Migration](./merge-migration.md) * [Installation](./installation.md) * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) @@ -13,6 +10,9 @@ * [Cross-Compiling](./cross-compiling.md) * [Homebrew](./homebrew.md) * [Update Priorities](./installation-priorities.md) +* [Run a Node](./run_a_node.md) +* [Become a Validator](./mainnet-validator.md) + * [Become a Testnet Validator](./testnet-validator.md) * [Key Management](./key-management.md) * [Create a wallet](./wallet-create.md) * [Create a validator](./validator-create.md) @@ -46,6 +46,7 @@ * [Pre-Releases](./advanced-pre-releases.md) * [Release Candidates](./advanced-release-candidates.md) * [MEV and Lighthouse](./builders.md) + * [Merge Migration](./merge-migration.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/intro.md b/book/src/intro.md index fca075892b..ef16913d68 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -18,6 +18,7 @@ We implement the specification as defined in the You may read this book from start to finish, or jump to some of these topics: - Follow the [Installation Guide](./installation.md) to install Lighthouse. +- Run your very [own beacon node](./run_a_node.md). - Learn about [becoming a mainnet validator](./mainnet-validator.md). - Get hacking with the [Development Environment Guide](./setup.md). - Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index c0ba048997..08f1b51e42 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -61,6 +61,7 @@ the relevant page for your execution engine for the required flags: - [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) - [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) +- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) Once you have configured your execution engine to open up the engine API (usually on port 8551) you should add the URL to your `lighthouse bn` flags with `--execution-endpoint `, as well as diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md new file mode 100644 index 0000000000..5ce42aa630 --- /dev/null +++ b/book/src/run_a_node.md @@ -0,0 +1,171 @@ +# Run a Node + +This document provides detail for users who want to run a Lighthouse beacon node. +You should be finished with one [Installation](./installation.md) method of your choice to continue with the following steps: + +1. Set up an [execution node](#step-1-set-up-an-execution-node); +1. Enable [checkpoint sync](#step-2-choose-a-checkpoint-sync-provider); +1. Run [Lighthouse](#step-3-run-lighthouse); +1. [Check logs](#step-4-check-logs); and +1. [Further readings](#step-5-further-readings). + +Checkpoint sync is *optional*; however, we recommend it since it is substantially faster +than syncing from genesis while still providing the same functionality. + +## Step 1: Set up an execution node + +The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions +present in blocks. Two flags are used to configure this connection: + +- `--execution-endpoint`: the *URL* of the execution engine API. Often this will be + `http://localhost:8551`. +- `--execution-jwt`: the *path* to the file containing the JWT secret shared by Lighthouse and the + execution engine. This is a mandatory form of authentication that ensures that Lighthouse +has authority to control the execution engine. + +Each execution engine has its own flags for configuring the engine API and JWT. +Please consult the relevant page of your execution engine for the required flags: + +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) +- [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) +- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) + +The execution engine connection must be *exclusive*, i.e. you must have one execution node +per beacon node. The reason for this is that the beacon node _controls_ the execution node. + +## Step 2: Choose a checkpoint sync provider + +Lighthouse supports fast sync from a recent finalized checkpoint. +The checkpoint sync is done using a [public endpoint](#use-a-community-checkpoint-sync-endpoint) +provided by the Ethereum community. + +In [step 3](#step-3-run-lighthouse), when running Lighthouse, +we will enable checkpoint sync by providing the URL to the `--checkpoint-sync-url` flag. + +### Use a community checkpoint sync endpoint + +The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the URL. + +For example, the URL for Sigma Prime's checkpoint sync server for mainnet is `https://mainnet.checkpoint.sigp.io`, +which we will use in [step 3](#step-3-run-lighthouse). + +## Step 3: Run Lighthouse + +To run Lighthouse, we use the three flags from the steps above: +- `--execution-endpoint`; +- `--execution-jwt`; and +- `--checkpoint-sync-url`. + +Additionally, we run Lighthouse with the `--network` flag, which selects a network: + +- `lighthouse` (no flag): Mainnet. +- `lighthouse --network mainnet`: Mainnet. +- `lighthouse --network goerli`: Goerli (testnet). + +Using the correct `--network` flag is very important; using the wrong flag can +result in penalties, slashings or lost deposits. As a rule of thumb, *always* +provide a `--network` flag instead of relying on the default. + +For the testnets we support [Goerli](https://goerli.net/) (`--network goerli`), +[Sepolia](https://sepolia.dev/) (`--network sepolia`), and [Gnosis chain](https://www.gnosis.io/) (`--network gnosis`). + +Minor modifications depend on if you want to run your node while [staking](#staking) or [non-staking](#non-staking). +In the following, we will provide examples of what a Lighthouse setup could look like. + +### Staking + +``` +lighthouse bn \ + --network mainnet \ + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex \ + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ + --http +``` + +A Lighthouse beacon node can be configured to expose an HTTP server by supplying the `--http` flag. +The default listen address is `127.0.0.1:5052`. +The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. + +### Non-staking + +``` +lighthouse bn \ + --network mainnet \ + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex \ + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ + --disable-deposit-contract-sync +``` + +Since we are not staking, we can use the `--disable-deposit-contract-sync` flag. + +--- + +Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. + +## Step 4: Check logs +Several logs help you identify if Lighthouse is running correctly. + +### Logs - Checkpoint sync +Lighthouse will print a message to indicate that checkpoint sync is being used: + +``` +INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon +``` + +After a short time (usually less than a minute), it will log the details of the checkpoint +loaded from the remote beacon node: + +``` +INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon +``` + +Once the checkpoint is loaded Lighthouse will sync forwards to the head of the chain. + +If a validator client is connected to the node then it will be able to start completing its duties +as soon as forwards sync completes. + +> **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint +> against a trusted source like another [public endpoint](https://eth-clients.github.io/checkpoint-sync-endpoints/), +> a friend's node, or a block explorer. + +#### Backfilling Blocks + +Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks +from the checkpoint back to genesis. + +The beacon node will log messages similar to the following each minute while it completes backfill +sync: + +``` +INFO Downloading historical blocks est_time: 5 hrs 0 mins, speed: 111.96 slots/sec, distance: 2020451 slots (40 weeks 0 days), service: slot_notifier +``` + +Once backfill is complete, a `INFO Historical block download complete` log will be emitted. + +Check out the [FAQ](./checkpoint-sync.md#faq) for more information on checkpoint sync. + +### Logs - Syncing + +You should see that Lighthouse remains in sync and marks blocks +as `verified` indicating that they have been processed successfully by the execution engine: + +``` +INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 +``` + + +## Step 5: Further readings + +Several other resources are the next logical step to explore after running your beacon node: + +- Learn how to [become a validator](./mainnet-validator.md); +- Explore how to [manage your keys](./key-management.md); +- Research on [validator management](./validator-management.md); +- Dig into the [APIs](./api.md) that the beacon node and validator client provide; +- Study even more about [checkpoint sync](./checkpoint-sync.md); or +- Investigate what steps had to be taken in the past to execute a smooth [merge migration](./merge-migration.md). + +Finally, if you a struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! \ No newline at end of file From a2969ba7de72ddbf86daa70e88582b6387f42431 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 29 Nov 2022 05:51:42 +0000 Subject: [PATCH 36/37] Improve debugging experience for builder proposals (#3725) ## Issue Addressed NA ## Proposed Changes This PR sets out to improve the logging/metrics experience when interacting with the builder. Namely, it: - Adds/changes metrics (see "Metrics Changes" section). - Adds new logs which show the duration of requests to the builder/local EL. - Refactors existing logs for consistency and so that the `parent_hash` is include in all relevant logs (we can grep for this field when trying to trace the flow of block production). Additionally, when I was implementing this PR I noticed that we skip some verification of the builder payload in the scenario where the builder return `Ok` but the local EL returns with `Err`. Namely, we were skipping the bid signature and other values like parent hash and prev randao. In this PR I've changed it so we *always* check these values and reject the bid if they're incorrect. With these changes, we'll sometimes choose to skip a proposal rather than propose something invalid -- that's the only side-effect to the changes that I can see. ## Metrics Changes - Changed: `execution_layer_request_times`: - `method = "get_blinded_payload_local"`: time taken to get a payload from a local EE. - `method = "get_blinded_payload_builder"`: time taken to get a blinded payload from a builder. - `method = "post_blinded_payload_builder"`: time taken to get a builder to reveal a payload they've previously supplied us. - `execution_layer_get_payload_outcome` - `outcome = "success"`: we successfully produced a payload from a builder or local EE. - `outcome = "failure"`: we were unable to get a payload from a builder or local EE. - New: `execution_layer_builder_reveal_payload_outcome` - `outcome = "success"`: a builder revealed a payload from a signed, blinded block. - `outcome = "failure"`: the builder did not reveal the payload. - New: `execution_layer_get_payload_source` - `type = "builder"`: we used a payload from a builder to produce a block. - `type = "local"`: we used a payload from a local EE to produce a block. - New: `execution_layer_get_payload_builder_rejections` has a `reason` field to describe why we rejected a payload from a builder. - New: `execution_layer_payload_bids` tracks the bid (in gwei) from the builder or local EE (local EE not yet supported, waiting on EEs to expose the value). Can only record values that fit inside an i64 (roughly 9 million ETH). ## Additional Info NA --- beacon_node/execution_layer/src/lib.rs | 530 +++++++++++++++++---- beacon_node/execution_layer/src/metrics.rs | 36 +- 2 files changed, 462 insertions(+), 104 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f222f28c33..2a2225cbdf 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -12,6 +12,7 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkChoiceState}; +use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -21,11 +22,13 @@ use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; +use std::fmt; use std::future::Future; use std::io::Write; use std::path::PathBuf; use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use strum::AsRefStr; use task_executor::TaskExecutor; use tokio::{ sync::{Mutex, MutexGuard, RwLock}, @@ -34,7 +37,7 @@ use tokio::{ use tokio_stream::wrappers::WatchStream; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, - ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, + ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, }; mod engine_api; @@ -66,6 +69,14 @@ const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); +/// A payload alongside some information about where it came from. +enum ProvenancedPayload

{ + /// A good ol' fashioned farm-to-table payload from your local EE. + Local(P), + /// A payload from a builder (e.g. mev-boost). + Builder(P), +} + #[derive(Debug)] pub enum Error { NoEngine, @@ -73,6 +84,7 @@ pub enum Error { ApiError(ApiError), Builder(builder_client::Error), NoHeaderFromBuilder, + CannotProduceHeader, EngineError(Box), NotSynced, ShuttingDown, @@ -550,7 +562,7 @@ impl ExecutionLayer { ) -> Result { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; - match Payload::block_type() { + let payload_result = match Payload::block_type() { BlockType::Blinded => { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, @@ -580,6 +592,40 @@ impl ExecutionLayer { forkchoice_update_params, ) .await + .map(ProvenancedPayload::Local) + } + }; + + // Track some metrics and return the result. + match payload_result { + Ok(ProvenancedPayload::Local(payload)) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, + &[metrics::LOCAL], + ); + Ok(payload) + } + Ok(ProvenancedPayload::Builder(payload)) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, + &[metrics::BUILDER], + ); + Ok(payload) + } + Err(e) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::FAILURE], + ); + Err(e) } } } @@ -594,7 +640,7 @@ impl ExecutionLayer { forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { if let Some(builder) = self.builder() { let slot = builder_params.slot; let pubkey = builder_params.pubkey; @@ -608,130 +654,202 @@ impl ExecutionLayer { "pubkey" => ?pubkey, "parent_hash" => ?parent_hash, ); - let (relay_result, local_result) = tokio::join!( - builder.get_builder_header::(slot, parent_hash, &pubkey), - self.get_full_payload_caching( - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - forkchoice_update_params, - ) + + // Wait for the builder *and* local EL to produce a payload (or return an error). + let ((relay_result, relay_duration), (local_result, local_duration)) = tokio::join!( + timed_future(metrics::GET_BLINDED_PAYLOAD_BUILDER, async { + builder + .get_builder_header::(slot, parent_hash, &pubkey) + .await + }), + timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { + self.get_full_payload_caching::( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + ) + .await + }) + ); + + info!( + self.log(), + "Requested blinded execution payload"; + "relay_fee_recipient" => match &relay_result { + Ok(Some(r)) => format!("{:?}", r.data.message.header.fee_recipient()), + Ok(None) => "empty response".to_string(), + Err(_) => "request failed".to_string(), + }, + "relay_response_ms" => relay_duration.as_millis(), + "local_fee_recipient" => match &local_result { + Ok(header) => format!("{:?}", header.fee_recipient()), + Err(_) => "request failed".to_string() + }, + "local_response_ms" => local_duration.as_millis(), + "parent_hash" => ?parent_hash, ); return match (relay_result, local_result) { (Err(e), Ok(local)) => { warn!( self.log(), - "Unable to retrieve a payload from a connected \ - builder, falling back to the local execution client: {e:?}" + "Builder error when requesting payload"; + "info" => "falling back to local execution client", + "relay_error" => ?e, + "local_block_hash" => ?local.block_hash(), + "parent_hash" => ?parent_hash, ); - Ok(local) + Ok(ProvenancedPayload::Local(local)) } (Ok(None), Ok(local)) => { info!( self.log(), - "No payload provided by connected builder. \ - Attempting to propose through local execution engine" + "Builder did not return a payload"; + "info" => "falling back to local execution client", + "local_block_hash" => ?local.block_hash(), + "parent_hash" => ?parent_hash, ); - Ok(local) + Ok(ProvenancedPayload::Local(local)) } (Ok(Some(relay)), Ok(local)) => { - let is_signature_valid = relay.data.verify_signature(spec); - let header = relay.data.message.header; + let header = &relay.data.message.header; info!( self.log(), - "Received a payload header from the connected builder"; - "block_hash" => ?header.block_hash(), + "Received local and builder payloads"; + "relay_block_hash" => ?header.block_hash(), + "local_block_hash" => ?local.block_hash(), + "parent_hash" => ?parent_hash, ); - let relay_value = relay.data.message.value; - let configured_value = self.inner.builder_profit_threshold; - if relay_value < configured_value { - info!( - self.log(), - "The value offered by the connected builder does not meet \ - the configured profit threshold. Using local payload."; - "configured_value" => ?configured_value, "relay_value" => ?relay_value - ); - Ok(local) - } else if header.parent_hash() != parent_hash { - warn!( - self.log(), - "Invalid parent hash from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if header.prev_randao() != prev_randao { - warn!( - self.log(), - "Invalid prev randao from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if header.timestamp() != local.timestamp() { - warn!( - self.log(), - "Invalid timestamp from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if header.block_number() != local.block_number() { - warn!( - self.log(), - "Invalid block number from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if !matches!(relay.version, Some(ForkName::Merge)) { - // Once fork information is added to the payload, we will need to - // check that the local and relay payloads match. At this point, if - // we are requesting a payload at all, we have to assume this is - // the Bellatrix fork. - warn!( - self.log(), - "Invalid fork from connected builder, falling \ - back to local execution engine." - ); - Ok(local) - } else if !is_signature_valid { - let pubkey_bytes = relay.data.message.pubkey; - warn!(self.log(), "Invalid signature for pubkey {pubkey_bytes} on \ - bid from connected builder, falling back to local execution engine."); - Ok(local) - } else { - if header.fee_recipient() != suggested_fee_recipient { + match verify_builder_bid( + &relay, + parent_hash, + prev_randao, + timestamp, + Some(local.block_number()), + self.inner.builder_profit_threshold, + spec, + ) { + Ok(()) => { + Ok(ProvenancedPayload::Builder(relay.data.message.header)) + } + Err(reason) if !reason.payload_invalid() => { info!( self.log(), - "Fee recipient from connected builder does \ - not match, using it anyways." + "Builder payload ignored"; + "info" => "using local payload", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, ); + Ok(ProvenancedPayload::Local(local)) + } + Err(reason) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, + &[reason.as_ref().as_ref()], + ); + warn!( + self.log(), + "Builder returned invalid payload"; + "info" => "using local payload", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, + ); + Ok(ProvenancedPayload::Local(local)) } - Ok(header) } } - (relay_result, Err(local_error)) => { - warn!(self.log(), "Failure from local execution engine. Attempting to \ - propose through connected builder"; "error" => ?local_error); - relay_result - .map_err(Error::Builder)? - .ok_or(Error::NoHeaderFromBuilder) - .map(|d| d.data.message.header) + (Ok(Some(relay)), Err(local_error)) => { + let header = &relay.data.message.header; + + info!( + self.log(), + "Received builder payload with local error"; + "relay_block_hash" => ?header.block_hash(), + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + match verify_builder_bid( + &relay, + parent_hash, + prev_randao, + timestamp, + None, + self.inner.builder_profit_threshold, + spec, + ) { + Ok(()) => { + Ok(ProvenancedPayload::Builder(relay.data.message.header)) + } + // If the payload is valid then use it. The local EE failed + // to produce a payload so we have no alternative. + Err(e) if !e.payload_invalid() => { + Ok(ProvenancedPayload::Builder(relay.data.message.header)) + } + Err(reason) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, + &[reason.as_ref().as_ref()], + ); + crit!( + self.log(), + "Builder returned invalid payload"; + "info" => "no local payload either - unable to propose block", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, + ); + Err(Error::CannotProduceHeader) + } + } + } + (Err(relay_error), Err(local_error)) => { + crit!( + self.log(), + "Unable to produce execution payload"; + "info" => "the local EL and builder both failed - unable to propose block", + "relay_error" => ?relay_error, + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + Err(Error::CannotProduceHeader) + } + (Ok(None), Err(local_error)) => { + crit!( + self.log(), + "Unable to produce execution payload"; + "info" => "the local EL failed and the builder returned nothing - \ + the block proposal will be missed", + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + Err(Error::CannotProduceHeader) } }; } - ChainHealth::Unhealthy(condition) => { - info!(self.log(), "Due to poor chain health the local execution engine will be used \ - for payload construction. To adjust chain health conditions \ - Use `builder-fallback` prefixed flags"; - "failed_condition" => ?condition) - } + ChainHealth::Unhealthy(condition) => info!( + self.log(), + "Chain is unhealthy, using local payload"; + "info" => "this helps protect the network. the --builder-fallback flags \ + can adjust the expected health conditions.", + "failed_condition" => ?condition + ), // Intentional no-op, so we never attempt builder API proposals pre-merge. ChainHealth::PreMerge => (), - ChainHealth::Optimistic => info!(self.log(), "The local execution engine is syncing \ - so the builder network cannot safely be used. Attempting \ - to build a block with the local execution engine"), + ChainHealth::Optimistic => info!( + self.log(), + "Chain is optimistic; can't build payload"; + "info" => "the local execution engine is syncing and the builder network \ + cannot safely be used - unable to propose block" + ), } } self.get_full_payload_caching( @@ -742,6 +860,7 @@ impl ExecutionLayer { forkchoice_update_params, ) .await + .map(ProvenancedPayload::Local) } /// Get a full payload without caching its result in the execution layer's payload cache. @@ -1404,18 +1523,223 @@ impl ExecutionLayer { "Sending block to builder"; "root" => ?block_root, ); + if let Some(builder) = self.builder() { - builder - .post_builder_blinded_blocks(block) - .await - .map_err(Error::Builder) - .map(|d| d.data) + let (payload_result, duration) = + timed_future(metrics::POST_BLINDED_PAYLOAD_BUILDER, async { + builder + .post_builder_blinded_blocks(block) + .await + .map_err(Error::Builder) + .map(|d| d.data) + }) + .await; + + match &payload_result { + Ok(payload) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + info!( + self.log(), + "Builder successfully revealed payload"; + "relay_response_ms" => duration.as_millis(), + "block_root" => ?block_root, + "fee_recipient" => ?payload.fee_recipient, + "block_hash" => ?payload.block_hash, + "parent_hash" => ?payload.parent_hash + ) + } + Err(e) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, + &[metrics::FAILURE], + ); + crit!( + self.log(), + "Builder failed to reveal payload"; + "info" => "this relay failure may cause a missed proposal", + "error" => ?e, + "relay_response_ms" => duration.as_millis(), + "block_root" => ?block_root, + "parent_hash" => ?block + .message() + .execution_payload() + .map(|payload| format!("{}", payload.parent_hash())) + .unwrap_or_else(|_| "unknown".to_string()) + ) + } + } + + payload_result } else { Err(Error::NoPayloadBuilder) } } } +#[derive(AsRefStr)] +#[strum(serialize_all = "snake_case")] +enum InvalidBuilderPayload { + LowValue { + profit_threshold: Uint256, + payload_value: Uint256, + }, + ParentHash { + payload: ExecutionBlockHash, + expected: ExecutionBlockHash, + }, + PrevRandao { + payload: Hash256, + expected: Hash256, + }, + Timestamp { + payload: u64, + expected: u64, + }, + BlockNumber { + payload: u64, + expected: Option, + }, + Fork { + payload: Option, + expected: ForkName, + }, + Signature { + signature: Signature, + pubkey: PublicKeyBytes, + }, +} + +impl InvalidBuilderPayload { + /// Returns `true` if a payload is objectively invalid and should never be included on chain. + fn payload_invalid(&self) -> bool { + match self { + // A low-value payload isn't invalid, it should just be avoided if possible. + InvalidBuilderPayload::LowValue { .. } => false, + InvalidBuilderPayload::ParentHash { .. } => true, + InvalidBuilderPayload::PrevRandao { .. } => true, + InvalidBuilderPayload::Timestamp { .. } => true, + InvalidBuilderPayload::BlockNumber { .. } => true, + InvalidBuilderPayload::Fork { .. } => true, + InvalidBuilderPayload::Signature { .. } => true, + } + } +} + +impl fmt::Display for InvalidBuilderPayload { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InvalidBuilderPayload::LowValue { + profit_threshold, + payload_value, + } => write!( + f, + "payload value of {} does not meet user-configured profit-threshold of {}", + payload_value, profit_threshold + ), + InvalidBuilderPayload::ParentHash { payload, expected } => { + write!(f, "payload block hash was {} not {}", payload, expected) + } + InvalidBuilderPayload::PrevRandao { payload, expected } => { + write!(f, "payload prev randao was {} not {}", payload, expected) + } + InvalidBuilderPayload::Timestamp { payload, expected } => { + write!(f, "payload timestamp was {} not {}", payload, expected) + } + InvalidBuilderPayload::BlockNumber { payload, expected } => { + write!(f, "payload block number was {} not {:?}", payload, expected) + } + InvalidBuilderPayload::Fork { payload, expected } => { + write!(f, "payload fork was {:?} not {}", payload, expected) + } + InvalidBuilderPayload::Signature { signature, pubkey } => write!( + f, + "invalid payload signature {} for pubkey {}", + signature, pubkey + ), + } + } +} + +/// Perform some cursory, non-exhaustive validation of the bid returned from the builder. +fn verify_builder_bid>( + bid: &ForkVersionedResponse>, + parent_hash: ExecutionBlockHash, + prev_randao: Hash256, + timestamp: u64, + block_number: Option, + profit_threshold: Uint256, + spec: &ChainSpec, +) -> Result<(), Box> { + let is_signature_valid = bid.data.verify_signature(spec); + let header = &bid.data.message.header; + let payload_value = bid.data.message.value; + + // Avoid logging values that we can't represent with our Prometheus library. + let payload_value_gwei = bid.data.message.value / 1_000_000_000; + if payload_value_gwei <= Uint256::from(i64::max_value()) { + metrics::set_gauge_vec( + &metrics::EXECUTION_LAYER_PAYLOAD_BIDS, + &[metrics::BUILDER], + payload_value_gwei.low_u64() as i64, + ); + } + + if payload_value < profit_threshold { + Err(Box::new(InvalidBuilderPayload::LowValue { + profit_threshold, + payload_value, + })) + } else if header.parent_hash() != parent_hash { + Err(Box::new(InvalidBuilderPayload::ParentHash { + payload: header.parent_hash(), + expected: parent_hash, + })) + } else if header.prev_randao() != prev_randao { + Err(Box::new(InvalidBuilderPayload::PrevRandao { + payload: header.prev_randao(), + expected: prev_randao, + })) + } else if header.timestamp() != timestamp { + Err(Box::new(InvalidBuilderPayload::Timestamp { + payload: header.timestamp(), + expected: timestamp, + })) + } else if block_number.map_or(false, |n| n != header.block_number()) { + Err(Box::new(InvalidBuilderPayload::BlockNumber { + payload: header.block_number(), + expected: block_number, + })) + } else if !matches!(bid.version, Some(ForkName::Merge)) { + // Once fork information is added to the payload, we will need to + // check that the local and relay payloads match. At this point, if + // we are requesting a payload at all, we have to assume this is + // the Bellatrix fork. + Err(Box::new(InvalidBuilderPayload::Fork { + payload: bid.version, + expected: ForkName::Merge, + })) + } else if !is_signature_valid { + Err(Box::new(InvalidBuilderPayload::Signature { + signature: bid.data.signature.clone(), + pubkey: bid.data.message.pubkey, + })) + } else { + Ok(()) + } +} + +/// A helper function to record the time it takes to execute a future. +async fn timed_future, T>(metric: &str, future: F) -> (T, Duration) { + let start = Instant::now(); + let result = future.await; + let duration = start.elapsed(); + metrics::observe_timer_vec(&metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metric], duration); + (result, duration) +} + #[cfg(test)] mod test { use super::*; diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 9b00193a4a..bb5a1088d1 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -4,10 +4,17 @@ pub const HIT: &str = "hit"; pub const MISS: &str = "miss"; pub const GET_PAYLOAD: &str = "get_payload"; pub const GET_BLINDED_PAYLOAD: &str = "get_blinded_payload"; +pub const GET_BLINDED_PAYLOAD_LOCAL: &str = "get_blinded_payload_local"; +pub const GET_BLINDED_PAYLOAD_BUILDER: &str = "get_blinded_payload_builder"; +pub const POST_BLINDED_PAYLOAD_BUILDER: &str = "post_blinded_payload_builder"; pub const NEW_PAYLOAD: &str = "new_payload"; pub const FORKCHOICE_UPDATED: &str = "forkchoice_updated"; pub const GET_TERMINAL_POW_BLOCK_HASH: &str = "get_terminal_pow_block_hash"; pub const IS_VALID_TERMINAL_POW_BLOCK_HASH: &str = "is_valid_terminal_pow_block_hash"; +pub const LOCAL: &str = "local"; +pub const BUILDER: &str = "builder"; +pub const SUCCESS: &str = "success"; +pub const FAILURE: &str = "failure"; lazy_static::lazy_static! { pub static ref EXECUTION_LAYER_PROPOSER_INSERTED: Result = try_create_int_counter( @@ -18,9 +25,11 @@ lazy_static::lazy_static! { "execution_layer_proposer_data_updated", "Count of times new proposer data is supplied", ); - pub static ref EXECUTION_LAYER_REQUEST_TIMES: Result = try_create_histogram_vec( + pub static ref EXECUTION_LAYER_REQUEST_TIMES: Result = + try_create_histogram_vec_with_buckets( "execution_layer_request_times", "Duration of calls to ELs", + decimal_buckets(-2, 1), &["method"] ); pub static ref EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD: Result = try_create_histogram( @@ -41,4 +50,29 @@ lazy_static::lazy_static! { "Indicates the payload status returned for a particular method", &["method", "status"] ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_OUTCOME: Result = try_create_int_counter_vec( + "execution_layer_get_payload_outcome", + "The success/failure outcomes from calling get_payload", + &["outcome"] + ); + pub static ref EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME: Result = try_create_int_counter_vec( + "execution_layer_builder_reveal_payload_outcome", + "The success/failure outcomes from a builder un-blinding a payload", + &["outcome"] + ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_SOURCE: Result = try_create_int_counter_vec( + "execution_layer_get_payload_source", + "The source of each payload returned from get_payload", + &["source"] + ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS: Result = try_create_int_counter_vec( + "execution_layer_get_payload_builder_rejections", + "The reasons why a payload from a builder was rejected", + &["reason"] + ); + pub static ref EXECUTION_LAYER_PAYLOAD_BIDS: Result = try_create_int_gauge_vec( + "execution_layer_payload_bids", + "The gwei bid value of payloads received by local EEs or builders. Only shows values up to i64::max_value.", + &["source"] + ); } From 3534c85e306a57aff79990d5a0a5900f3b1819a1 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 29 Nov 2022 08:19:27 +0000 Subject: [PATCH 37/37] Optimize finalized chain sync by skipping newPayload messages (#3738) ## Issue Addressed #3704 ## Proposed Changes Adds is_syncing_finalized: bool parameter for block verification functions. Sets the payload_verification_status to Optimistic if is_syncing_finalized is true. Uses SyncState in NetworkGlobals in BeaconProcessor to retrieve the syncing status. ## Additional Info I could implement FinalizedSignatureVerifiedBlock if you think it would be nicer. --- beacon_node/beacon_chain/src/beacon_chain.rs | 12 +++- .../beacon_chain/src/block_verification.rs | 22 ++++-- .../beacon_chain/src/execution_payload.rs | 44 ++++++++---- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 10 ++- .../beacon_chain/tests/block_verification.rs | 67 ++++++++++++++----- .../tests/payload_invalidation.rs | 16 +++-- beacon_node/beacon_chain/tests/store_tests.rs | 5 +- beacon_node/beacon_chain/tests/tests.rs | 5 +- beacon_node/http_api/src/publish_blocks.rs | 11 ++- .../src/types/sync_state.rs | 11 +++ .../network/src/beacon_processor/mod.rs | 22 +++++- .../beacon_processor/worker/gossip_methods.rs | 9 ++- .../beacon_processor/worker/sync_methods.rs | 24 +++++-- testing/ef_tests/src/cases/fork_choice.rs | 3 +- 15 files changed, 200 insertions(+), 62 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6f409fdadc..564e2582e6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -18,7 +18,7 @@ use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData}; use crate::events::ServerSentEventHandler; -use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; +use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; @@ -2341,6 +2341,7 @@ impl BeaconChain { self: &Arc, chain_segment: Vec>>, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { let mut imported_blocks = 0; @@ -2409,6 +2410,7 @@ impl BeaconChain { signature_verified_block.block_root(), signature_verified_block, count_unrealized, + notify_execution_layer, ) .await { @@ -2497,6 +2499,7 @@ impl BeaconChain { block_root: Hash256, unverified_block: B, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2510,8 +2513,11 @@ impl BeaconChain { // A small closure to group the verification and import errors. let chain = self.clone(); let import_block = async move { - let execution_pending = - unverified_block.into_execution_pending_block(block_root, &chain)?; + let execution_pending = unverified_block.into_execution_pending_block( + block_root, + &chain, + notify_execution_layer, + )?; chain .import_execution_pending_block(execution_pending, count_unrealized) .await diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 104de57dbf..1fdc1518a2 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -45,7 +45,7 @@ use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - AllowOptimisticImport, PayloadNotifier, + AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; @@ -636,8 +636,9 @@ pub trait IntoExecutionPendingBlock: Sized { self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockError> { - self.into_execution_pending_block_slashable(block_root, chain) + self.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { @@ -653,6 +654,7 @@ pub trait IntoExecutionPendingBlock: Sized { self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; @@ -899,10 +901,15 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; - execution_pending.into_execution_pending_block_slashable(block_root, chain) + execution_pending.into_execution_pending_block_slashable( + block_root, + chain, + notify_execution_layer, + ) } fn block(&self) -> &SignedBeaconBlock { @@ -1032,6 +1039,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { @@ -1047,6 +1055,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc parent, self.consensus_context, chain, + notify_execution_layer, ) .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) } @@ -1063,13 +1072,14 @@ impl IntoExecutionPendingBlock for Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? - .into_execution_pending_block_slashable(block_root, chain) + .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) } fn block(&self) -> &SignedBeaconBlock { @@ -1091,6 +1101,7 @@ impl ExecutionPendingBlock { parent: PreProcessingSnapshot, mut consensus_context: ConsensusContext, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { if let Some(parent) = chain .canonical_head @@ -1237,7 +1248,8 @@ impl ExecutionPendingBlock { // Define a future that will verify the execution payload with an execution engine (but // don't execute it yet). - let payload_notifier = PayloadNotifier::new(chain.clone(), block.clone(), &state)?; + let payload_notifier = + PayloadNotifier::new(chain.clone(), block.clone(), &state, notify_execution_layer)?; let is_valid_merge_transition_block = is_merge_transition_block(&state, block.message().body()); let payload_verification_future = async move { diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 2221d1fc7c..c859aa54fc 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -35,6 +35,16 @@ pub enum AllowOptimisticImport { No, } +/// Signal whether the execution payloads of new blocks should be +/// immediately verified with the EL or imported optimistically without +/// any EL communication. +#[derive(Default, Clone, Copy)] +pub enum NotifyExecutionLayer { + #[default] + Yes, + No, +} + /// Used to await the result of executing payload with a remote EE. pub struct PayloadNotifier { pub chain: Arc>, @@ -47,21 +57,27 @@ impl PayloadNotifier { chain: Arc>, block: Arc>, state: &BeaconState, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { - let payload_verification_status = if is_execution_enabled(state, block.message().body()) { - // Perform the initial stages of payload verification. - // - // We will duplicate these checks again during `per_block_processing`, however these checks - // are cheap and doing them here ensures we protect the execution engine from junk. - partially_verify_execution_payload( - state, - block.message().execution_payload()?, - &chain.spec, - ) - .map_err(BlockError::PerBlockProcessingError)?; - None - } else { - Some(PayloadVerificationStatus::Irrelevant) + let payload_verification_status = match notify_execution_layer { + NotifyExecutionLayer::No => Some(PayloadVerificationStatus::Optimistic), + NotifyExecutionLayer::Yes => { + if is_execution_enabled(state, block.message().body()) { + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these checks + // are cheap and doing them here ensures we protect the execution engine from junk. + partially_verify_execution_payload( + state, + block.message().execution_payload()?, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; + None + } else { + Some(PayloadVerificationStatus::Irrelevant) + } + } }; Ok(Self { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 5ead5311e5..3889fe4aa5 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -63,6 +63,7 @@ pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; pub use execution_layer::EngineState; +pub use execution_payload::NotifyExecutionLayer; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a1c7acf173..a0f42ec214 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2,7 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, - BeaconChainError, ProduceBlockVerification, + BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; use crate::{ builder::{BeaconChainBuilder, Witness}, @@ -1460,7 +1460,12 @@ where self.set_current_slot(slot); let block_hash: SignedBeaconBlockHash = self .chain - .process_block(block_root, Arc::new(block), CountUnrealized::True) + .process_block( + block_root, + Arc::new(block), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await? .into(); self.chain.recompute_head_at_current_slot().await; @@ -1477,6 +1482,7 @@ where block.canonical_root(), Arc::new(block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await? .into(); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 998f22f770..38a55e2212 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -3,7 +3,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult}; +use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer}; use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; @@ -147,14 +147,18 @@ async fn chain_segment_full_segment() { // Sneak in a little check to ensure we can process empty chain segments. harness .chain - .process_chain_segment(vec![], CountUnrealized::True) + .process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error() .expect("should import empty chain segment"); harness .chain - .process_chain_segment(blocks.clone(), CountUnrealized::True) + .process_chain_segment( + blocks.clone(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .expect("should import chain segment"); @@ -183,7 +187,11 @@ async fn chain_segment_varying_chunk_size() { for chunk in blocks.chunks(*chunk_size) { harness .chain - .process_chain_segment(chunk.to_vec(), CountUnrealized::True) + .process_chain_segment( + chunk.to_vec(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); @@ -219,7 +227,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -239,7 +247,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -270,7 +278,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -291,7 +299,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -317,7 +325,7 @@ async fn assert_invalid_signature( matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -339,7 +347,11 @@ async fn assert_invalid_signature( // imported prior to this test. let _ = harness .chain - .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .process_chain_segment( + ancestor_blocks, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await; harness.chain.recompute_head_at_current_slot().await; @@ -349,6 +361,7 @@ async fn assert_invalid_signature( snapshots[block_index].beacon_block.canonical_root(), snapshots[block_index].beacon_block.clone(), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await; assert!( @@ -400,7 +413,11 @@ async fn invalid_signature_gossip_block() { .collect(); harness .chain - .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .process_chain_segment( + ancestor_blocks, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .expect("should import all blocks prior to the one being tested"); @@ -412,7 +429,8 @@ async fn invalid_signature_gossip_block() { .process_block( signed_block.canonical_root(), Arc::new(signed_block), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await, Err(BlockError::InvalidSignature) @@ -446,7 +464,7 @@ async fn invalid_signature_block_proposal() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -644,7 +662,7 @@ async fn invalid_signature_deposit() { !matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -725,6 +743,7 @@ async fn block_gossip_verification() { gossip_verified.block_root, gossip_verified, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .expect("should import valid gossip verified block"); @@ -996,6 +1015,7 @@ async fn verify_block_for_gossip_slashing_detection() { verified_block.block_root, verified_block, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1035,6 +1055,7 @@ async fn verify_block_for_gossip_doppelganger_detection() { verified_block.block_root, verified_block, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1180,7 +1201,8 @@ async fn add_base_block_to_altair_chain() { .process_block( base_block.canonical_root(), Arc::new(base_block.clone()), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .err() @@ -1195,7 +1217,11 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(base_block)], CountUnrealized::True) + .process_chain_segment( + vec![Arc::new(base_block)], + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1313,7 +1339,8 @@ async fn add_altair_block_to_base_chain() { .process_block( altair_block.canonical_root(), Arc::new(altair_block.clone()), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .err() @@ -1328,7 +1355,11 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(altair_block)], CountUnrealized::True) + .process_chain_segment( + vec![Arc::new(altair_block)], + CountUnrealized::True, + NotifyExecutionLayer::Yes + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2336c3ba99..d77cc19678 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -7,8 +7,8 @@ use beacon_chain::otb_verification_service::{ use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, StateSkipConfig, + WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ @@ -693,6 +693,7 @@ async fn invalidates_all_descendants() { fork_block.canonical_root(), Arc::new(fork_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -789,6 +790,7 @@ async fn switches_heads() { fork_block.canonical_root(), Arc::new(fork_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1035,7 +1037,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True).await, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1317,7 +1319,12 @@ async fn build_optimistic_chain( for block in blocks { rig.harness .chain - .process_block(block.canonical_root(), block, CountUnrealized::True) + .process_block( + block.canonical_root(), + block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .unwrap(); } @@ -1879,6 +1886,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { fork_block.canonical_root(), fork_block.clone(), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index b1907bc96e..b2fc7a6402 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,8 +7,8 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, - BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler, - WhenSlotSkipped, + BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, + ServerSentEventHandler, WhenSlotSkipped, }; use fork_choice::CountUnrealized; use lazy_static::lazy_static; @@ -2148,6 +2148,7 @@ async fn weak_subjectivity_sync() { full_block.canonical_root(), Arc::new(full_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index a13946bf2b..d80db132ef 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,7 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - BeaconChain, StateSkipConfig, WhenSlotSkipped, + BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; use fork_choice::CountUnrealized; use lazy_static::lazy_static; @@ -687,7 +687,8 @@ async fn run_skip_slot_test(skip_slots: u64) { .process_block( harness_a.chain.head_snapshot().beacon_block_root, harness_a.chain.head_snapshot().beacon_block.clone(), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(), diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 3c50fb95a2..08355c1d37 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,6 +1,8 @@ use crate::metrics; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; +use beacon_chain::{ + BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, +}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; use slog::{crit, error, info, warn, Logger}; @@ -35,7 +37,12 @@ pub async fn publish_block( let block_root = block_root.unwrap_or_else(|| block.canonical_root()); match chain - .process_block(block_root, block.clone(), CountUnrealized::True) + .process_block( + block_root, + block.clone(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await { Ok(root) => { diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index ce03f61ffe..5f09aec27a 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -74,6 +74,17 @@ impl SyncState { } } + pub fn is_syncing_finalized(&self) -> bool { + match self { + SyncState::SyncingFinalized { .. } => true, + SyncState::SyncingHead { .. } => false, + SyncState::SyncTransition => false, + SyncState::BackFillSyncing { .. } => false, + SyncState::Synced => false, + SyncState::Stalled => false, + } + } + /// Returns true if the node is synced. /// /// NOTE: We consider the node synced if it is fetching old historical blocks. diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index aa4286b9cd..9528cfd1df 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -41,7 +41,7 @@ use crate::sync::manager::BlockProcessType; use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::parking_lot::Mutex; -use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock}; +use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExecutionLayer}; use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; @@ -1587,8 +1587,24 @@ impl BeaconProcessor { /* * Verification for a chain segment (multiple blocks). */ - Work::ChainSegment { process_id, blocks } => task_spawner - .spawn_async(async move { worker.process_chain_segment(process_id, blocks).await }), + Work::ChainSegment { process_id, blocks } => { + let notify_execution_layer = if self + .network_globals + .sync_state + .read() + .is_syncing_finalized() + { + NotifyExecutionLayer::No + } else { + NotifyExecutionLayer::Yes + }; + + task_spawner.spawn_async(async move { + worker + .process_chain_segment(process_id, blocks, notify_execution_layer) + .await + }) + } /* * Processing of Status Messages. */ diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 4f1fd2cede..947d9cfe27 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -7,7 +7,7 @@ use beacon_chain::{ sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, - GossipVerifiedBlock, + GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; @@ -934,7 +934,12 @@ impl Worker { match self .chain - .process_block(block_root, verified_block, CountUnrealized::True) + .process_block( + block_root, + verified_block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await { Ok(block_root) => { diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index eebf751359..1ec045e97e 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -10,6 +10,7 @@ use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::CountUnrealized; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, + NotifyExecutionLayer, }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; @@ -85,7 +86,12 @@ impl Worker { let slot = block.slot(); let result = self .chain - .process_block(block_root, block, CountUnrealized::True) + .process_block( + block_root, + block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -127,6 +133,7 @@ impl Worker { &self, sync_type: ChainSegmentProcessId, downloaded_blocks: Vec>>, + notify_execution_layer: NotifyExecutionLayer, ) { let result = match sync_type { // this a request from the range sync @@ -136,7 +143,11 @@ impl Worker { let sent_blocks = downloaded_blocks.len(); match self - .process_blocks(downloaded_blocks.iter(), count_unrealized) + .process_blocks( + downloaded_blocks.iter(), + count_unrealized, + notify_execution_layer, + ) .await { (_, Ok(_)) => { @@ -215,7 +226,11 @@ impl Worker { // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse match self - .process_blocks(downloaded_blocks.iter().rev(), CountUnrealized::True) + .process_blocks( + downloaded_blocks.iter().rev(), + CountUnrealized::True, + notify_execution_layer, + ) .await { (imported_blocks, Err(e)) => { @@ -246,11 +261,12 @@ impl Worker { &self, downloaded_blocks: impl Iterator>>, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec> = downloaded_blocks.cloned().collect(); match self .chain - .process_chain_segment(blocks, count_unrealized) + .process_chain_segment(blocks, count_unrealized, notify_execution_layer) .await { ChainSegmentResult::Successful { imported_blocks } => { diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 8faf4db821..039efb3684 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,7 +7,7 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, CountUnrealized, + BeaconChainTypes, CachedHead, CountUnrealized, NotifyExecutionLayer, }; use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; use serde::Deserialize; @@ -388,6 +388,7 @@ impl Tester { block_root, block.clone(), CountUnrealized::False, + NotifyExecutionLayer::Yes, ))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!(