From a53830fd60a119bf3f659b253360af8027128e83 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 26 Mar 2023 22:39:28 +0000 Subject: [PATCH 01/17] Release v4.0.1 (#4125) ## Issue Addressed NA ## Proposed Changes - Bump versions. - Bump openssl version to resolve various `cargo audit` notices. ## Additional Info - Requires further testing --- Cargo.lock | 16 ++++++++-------- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eb26414d09..b5ed6497c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -618,7 +618,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.0.1-rc.0" +version = "4.0.1" dependencies = [ "beacon_chain", "clap", @@ -786,7 +786,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "4.0.1-rc.0" +version = "4.0.1" dependencies = [ "beacon_node", "clap", @@ -3770,7 +3770,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.0.1-rc.0" +version = "4.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -4374,7 +4374,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.0.1-rc.0" +version = "4.0.1" dependencies = [ "account_manager", "account_utils", @@ -5333,9 +5333,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" dependencies = [ "bitflags", "cfg-if", @@ -5374,9 +5374,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" dependencies = [ "autocfg 1.1.0", "cc", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index fed3b96ca1..d8aa42a67e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.0.1-rc.0" +version = "4.0.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index f4e19e7962..c1b6333a37 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.0.1-rc.0-", - fallback = "Lighthouse/v4.0.1-rc.0" + prefix = "Lighthouse/v4.0.1-", + fallback = "Lighthouse/v4.0.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 84b66c37d2..22c4ce305f 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.0.1-rc.0" +version = "4.0.1" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 143ca86c30..2b7727ec11 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "4.0.1-rc.0" +version = "4.0.1" authors = ["Sigma Prime "] edition = "2021" autotests = false From f4d13f914984549482c8cbaa3e1e192741ea33dd Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 28 Mar 2023 16:49:21 +0000 Subject: [PATCH 02/17] Update arbitrary (#4139) ## Proposed Changes To prevent breakages from `cargo update`, this updates the `arbitrary` crate to a new commit from my fork. Unfortunately we still need to use my fork (even though my `bound` change was merged) because of this issue: https://github.com/rust-lang/rust-clippy/issues/10185. In a couple of Rust versions it should be resolved upstream. --- Cargo.lock | 9 ++++----- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5ed6497c4..92502ca137 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -211,8 +211,8 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "arbitrary" -version = "1.2.2" -source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" +version = "1.3.0" +source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" dependencies = [ "derive_arbitrary", ] @@ -1681,10 +1681,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.2.2" -source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" +version = "1.3.0" +source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" dependencies = [ - "darling 0.14.3", "proc-macro2", "quote", "syn", diff --git a/Cargo.toml b/Cargo.toml index 46852645eb..ba07de0443 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,7 +100,7 @@ eth2_hashing = { path = "crypto/eth2_hashing" } tree_hash = { path = "consensus/tree_hash" } tree_hash_derive = { path = "consensus/tree_hash_derive" } eth2_serde_utils = { path = "consensus/serde_utils" } -arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" } +arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } [patch."https://github.com/ralexstokes/mev-rs"] mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } From 65d0f63639b4bd41f3a72345a2ea569b6751b61f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 28 Mar 2023 22:07:03 +0000 Subject: [PATCH 03/17] Update Rust version in lcli Dockerfile (#4121) ## Issue Addressed The minimum supported Rust version has been set to 1.66 as of Lighthouse v4.0.0. This PR updates Rust to 1.66 in lcli Dockerfile. Co-authored-by: Jimmy Chen --- lcli/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lcli/Dockerfile b/lcli/Dockerfile index feda81d030..079e5c779b 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.65.0-bullseye AS builder +FROM rust:1.66.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE From d3c20ffa9decf22a351ac0b59162f19c4660948c Mon Sep 17 00:00:00 2001 From: int88 Date: Tue, 28 Mar 2023 22:07:05 +0000 Subject: [PATCH 04/17] improve error message (#4141) ## Issue Addressed NA ## Proposed Changes Not use magic number directly in the error message. ## Additional Info NA --- beacon_node/http_api/src/attestation_performance.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index ca68d4d04c..3e7d8d5e31 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -77,8 +77,8 @@ pub fn get_attestation_performance( // query is within permitted bounds to prevent potential OOM errors. if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS { return Err(custom_bad_request(format!( - "end_epoch must not exceed start_epoch by more than 100 epochs. start: {}, end: {}", - query.start_epoch, query.end_epoch + "end_epoch must not exceed start_epoch by more than {} epochs. start: {}, end: {}", + MAX_REQUEST_RANGE_EPOCHS, query.start_epoch, query.end_epoch ))); } From 6bb28bc8064c78ca459291bf20a061ae7ce213a9 Mon Sep 17 00:00:00 2001 From: Christopher Chong Date: Wed, 29 Mar 2023 02:56:37 +0000 Subject: [PATCH 05/17] Add debug fork choice api (#4003) ## Issue Addressed Which issue # does this PR address? https://github.com/sigp/lighthouse/issues/3669 ## Proposed Changes Please list or describe the changes introduced by this PR. - A new API to fetch fork choice data, as specified [here](https://github.com/ethereum/beacon-APIs/pull/232) - A new integration test to test the new API ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. - `extra_data` field specified in the beacon-API spec is not implemented, please let me know if I should instead. Co-authored-by: Michael Sproul --- beacon_node/http_api/src/lib.rs | 56 +++++++++++++++++- beacon_node/http_api/tests/tests.rs | 57 ++++++++++++++++++- common/eth2/src/lib.rs | 12 ++++ common/eth2/src/types.rs | 20 +++++++ .../src/proto_array_fork_choice.rs | 16 +++++- 5 files changed, 158 insertions(+), 3 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index d3d99c5c9f..b36adc5c08 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -30,7 +30,8 @@ use beacon_chain::{ pub use block_id::BlockId; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ - self as api_types, EndpointVersion, SkipRandaoVerification, ValidatorId, ValidatorStatus, + self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SkipRandaoVerification, + ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -2148,6 +2149,58 @@ pub fn serve( }, ); + // GET debug/fork_choice + let get_debug_fork_choice = eth_v1 + .and(warp::path("debug")) + .and(warp::path("fork_choice")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); + + let proto_array = beacon_fork_choice.proto_array().core_proto_array(); + + let fork_choice_nodes = proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; + + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node + .justified_checkpoint + .map(|checkpoint| checkpoint.epoch), + finalized_epoch: node + .finalized_checkpoint + .map(|checkpoint| checkpoint.epoch), + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + } + }) + .collect::>(); + Ok(ForkChoice { + justified_checkpoint: proto_array.justified_checkpoint, + finalized_checkpoint: proto_array.finalized_checkpoint, + fork_choice_nodes, + }) + }) + }); + /* * node */ @@ -3676,6 +3729,7 @@ pub fn serve( .uor(get_config_deposit_contract) .uor(get_debug_beacon_states) .uor(get_debug_beacon_heads) + .uor(get_debug_fork_choice) .uor(get_node_identity) .uor(get_node_version) .uor(get_node_syncing) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 977c737fd0..1112cc5ae2 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -8,7 +8,7 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, + types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; use execution_layer::test_utils::TestingBuilder; @@ -1679,6 +1679,59 @@ impl ApiTester { self } + pub async fn test_get_debug_fork_choice(self) -> Self { + let result = self.client.get_debug_fork_choice().await.unwrap(); + + let beacon_fork_choice = self.chain.canonical_head.fork_choice_read_lock(); + + let expected_proto_array = beacon_fork_choice.proto_array().core_proto_array(); + + assert_eq!( + result.justified_checkpoint, + expected_proto_array.justified_checkpoint + ); + assert_eq!( + result.finalized_checkpoint, + expected_proto_array.finalized_checkpoint + ); + + let expected_fork_choice_nodes: Vec = expected_proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| expected_proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node.justified_checkpoint.map(|checkpoint| checkpoint.epoch), + finalized_epoch: node.finalized_checkpoint.map(|checkpoint| checkpoint.epoch), + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + } + }) + .collect(); + + assert_eq!(result.fork_choice_nodes, expected_fork_choice_nodes); + + // need to drop beacon_fork_choice here, else borrow checker will complain + // that self cannot be moved out since beacon_fork_choice borrowed self.chain + // and might still live after self is moved out + drop(beacon_fork_choice); + self + } + fn validator_count(&self) -> usize { self.chain.head_snapshot().beacon_state.validators().len() } @@ -4148,6 +4201,8 @@ async fn debug_get() { .test_get_debug_beacon_states() .await .test_get_debug_beacon_heads() + .await + .test_get_debug_fork_choice() .await; } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 4d74299fff..d00d4aa4b7 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1334,6 +1334,18 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET v1/debug/fork_choice` + pub async fn get_debug_fork_choice(&self) -> Result { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("fork_choice"); + + self.get(path).await + } + /// `GET validator/duties/proposer/{epoch}` pub async fn get_validator_duties_proposer( &self, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 175c7db786..0fddaaab20 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1197,6 +1197,26 @@ pub struct LivenessResponseData { pub is_live: bool, } +#[derive(Debug, Serialize, Deserialize)] +pub struct ForkChoice { + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, + pub fork_choice_nodes: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct ForkChoiceNode { + pub slot: Slot, + pub block_root: Hash256, + pub parent_root: Option, + pub justified_epoch: Option, + pub finalized_epoch: Option, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub weight: u64, + pub validity: Option, + pub execution_block_hash: Option, +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index eae54e7342..6db1ac132f 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -10,7 +10,10 @@ use crate::{ use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::collections::{BTreeSet, HashMap}; +use std::{ + collections::{BTreeSet, HashMap}, + fmt, +}; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -125,6 +128,17 @@ impl ExecutionStatus { } } +impl fmt::Display for ExecutionStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ExecutionStatus::Valid(_) => write!(f, "valid"), + ExecutionStatus::Invalid(_) => write!(f, "invalid"), + ExecutionStatus::Optimistic(_) => write!(f, "optimistic"), + ExecutionStatus::Irrelevant(_) => write!(f, "irrelevant"), + } + } +} + /// A block that is to be applied to the fork choice. /// /// A simplified version of `types::BeaconBlock`. From 788a4b718f689be4db611e403fb6a8c7426fe8db Mon Sep 17 00:00:00 2001 From: Maksim Shcherbo Date: Wed, 29 Mar 2023 02:56:39 +0000 Subject: [PATCH 06/17] Optimise `update_validators` by decrypting key cache only when necessary (#4126) ## Title Optimise `update_validators` by decrypting key cache only when necessary ## Issue Addressed Resolves [#3968: Slow performance of validator client PATCH API with hundreds of keys](https://github.com/sigp/lighthouse/issues/3968) ## Proposed Changes 1. Add a check to determine if there is at least one local definition before decrypting the key cache. 2. Assign an empty `KeyCache` when all definitions are of the `Web3Signer` type. 3. Perform cache-related operations (e.g., saving the modified key cache) only if there are local definitions. ## Additional Info This PR addresses the excessive CPU usage and slow performance experienced when using the `PATCH lighthouse/validators/{pubkey}` request with a large number of keys. The issue was caused by the key cache using cryptography to decipher and cipher the cache entities every time the request was made. This operation called `scrypt`, which was very slow and required a lot of memory when there were many concurrent requests. These changes have no impact on the overall functionality but can lead to significant performance improvements when working with remote signers. Importantly, the key cache is never used when there are only `Web3Signer` definitions, avoiding the expensive operation of decrypting the key cache in such cases. Co-authored-by: Maksim Shcherbo --- .../src/initialized_validators.rs | 27 ++++++++++++++++--- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 7fe2f5f8ec..468fc2b06b 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -989,7 +989,23 @@ impl InitializedValidators { let cache = KeyCache::open_or_create(&self.validators_dir).map_err(Error::UnableToOpenKeyCache)?; - let mut key_cache = self.decrypt_key_cache(cache, &mut key_stores).await?; + + // Check if there is at least one local definition. + let has_local_definitions = self.definitions.as_slice().iter().any(|def| { + matches!( + def.signing_definition, + SigningDefinition::LocalKeystore { .. } + ) + }); + + // Only decrypt cache when there is at least one local definition. + // Decrypting cache is a very expensive operation which is never used for web3signer. + let mut key_cache = if has_local_definitions { + self.decrypt_key_cache(cache, &mut key_stores).await? + } else { + // Assign an empty KeyCache if all definitions are of the Web3Signer type. + KeyCache::new() + }; let mut disabled_uuids = HashSet::new(); for def in self.definitions.as_slice() { @@ -1115,13 +1131,16 @@ impl InitializedValidators { ); } } - for uuid in disabled_uuids { - key_cache.remove(&uuid); + + if has_local_definitions { + for uuid in disabled_uuids { + key_cache.remove(&uuid); + } } let validators_dir = self.validators_dir.clone(); let log = self.log.clone(); - if key_cache.is_modified() { + if has_local_definitions && key_cache.is_modified() { tokio::task::spawn_blocking(move || { match key_cache.save(validators_dir) { Err(e) => warn!( From 12205a88110e865e9e2aff9369bded1c8f951f9a Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 29 Mar 2023 23:55:55 +0000 Subject: [PATCH 07/17] Correct log for ENR (#4133) ## Issue Addressed https://github.com/sigp/lighthouse/issues/4080 Fixes a log when displaying the initial ENR. --- beacon_node/lighthouse_network/src/discovery/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index dda68aff95..2966644a89 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -207,7 +207,7 @@ impl Discovery { let local_node_id = local_enr.node_id(); info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), - "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp6() + "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6() ); let listen_socket = match config.listen_addrs() { crate::listen_addr::ListenAddress::V4(v4_addr) => v4_addr.udp_socket_addr(), From 036b797b2c1831352f937356576b3c78c65220ad Mon Sep 17 00:00:00 2001 From: Daniel Ramirez Chiquillo Date: Thu, 30 Mar 2023 06:08:37 +0000 Subject: [PATCH 08/17] Add `finalized` to HTTP API responses (#3753) ## Issue Addressed #3708 ## Proposed Changes - Add `is_finalized_block` method to `BeaconChain` in `beacon_node/beacon_chain/src/beacon_chain.rs`. - Add `is_finalized_state` method to `BeaconChain` in `beacon_node/beacon_chain/src/beacon_chain.rs`. - Add `fork_and_execution_optimistic_and_finalized` in `beacon_node/http_api/src/state_id.rs`. - Add `ExecutionOptimisticFinalizedForkVersionedResponse` type in `consensus/types/src/fork_versioned_response.rs`. - Add `execution_optimistic_finalized_fork_versioned_response`function in `beacon_node/http_api/src/version.rs`. - Add `ExecutionOptimisticFinalizedResponse` type in `common/eth2/src/types.rs`. - Add `add_execution_optimistic_finalized` method in `common/eth2/src/types.rs`. - Update API response methods to include finalized. - Remove `execution_optimistic_fork_versioned_response` Co-authored-by: Michael Sproul --- Cargo.lock | 769 ++++++++++-------- beacon_node/beacon_chain/src/beacon_chain.rs | 40 + beacon_node/http_api/src/attester_duties.rs | 6 +- beacon_node/http_api/src/block_id.rs | 73 +- beacon_node/http_api/src/lib.rs | 297 +++---- beacon_node/http_api/src/proposer_duties.rs | 4 +- .../http_api/src/standard_block_rewards.rs | 6 +- beacon_node/http_api/src/state_id.rs | 75 +- .../http_api/src/sync_committee_rewards.rs | 6 +- .../http_api/src/validator_inclusion.rs | 2 +- beacon_node/http_api/src/version.rs | 13 +- beacon_node/http_api/tests/tests.rs | 320 +++++++- beacon_node/store/src/hot_cold_store.rs | 2 +- common/eth2/src/lib.rs | 44 +- common/eth2/src/types.rs | 20 + .../types/src/fork_versioned_response.rs | 40 + 16 files changed, 1132 insertions(+), 585 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92502ca137..c7cdf212bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -88,6 +88,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "aead" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c192eb8f11fc081b0fe4259ba5af04217d4e0faddd02417310a927911abd7c8" +dependencies = [ + "crypto-common", + "generic-array", +] + [[package]] name = "aes" version = "0.6.0" @@ -113,17 +123,14 @@ dependencies = [ ] [[package]] -name = "aes-gcm" -version = "0.8.0" +name = "aes" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" +checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ - "aead 0.3.2", - "aes 0.6.0", - "cipher 0.2.5", - "ctr 0.6.0", - "ghash 0.3.1", - "subtle", + "cfg-if", + "cipher 0.4.4", + "cpufeatures", ] [[package]] @@ -140,6 +147,20 @@ dependencies = [ "subtle", ] +[[package]] +name = "aes-gcm" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +dependencies = [ + "aead 0.5.1", + "aes 0.8.2", + "cipher 0.4.4", + "ctr 0.9.2", + "ghash 0.5.0", + "subtle", +] + [[package]] name = "aes-soft" version = "0.6.4" @@ -248,14 +269,14 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] name = "asn1-rs" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf6690c370453db30743b373a60ba498fc0d6d83b11f4abfd87a84a075db5dd4" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ "asn1-rs-derive 0.4.0", "asn1-rs-impl", @@ -264,7 +285,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -330,29 +351,29 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite 0.2.9", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ "proc-macro2", "quote", @@ -361,9 +382,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ "proc-macro2", "quote", @@ -537,14 +558,14 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client#53690a711e33614d59d4d44fb09762b4699e2a4e" +source = "git+https://github.com/ralexstokes/beacon-api-client#30679e9e25d61731cde54e14cd8a3688a39d8e5b" dependencies = [ "ethereum-consensus", "http", @@ -729,9 +750,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -965,9 +986,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -997,10 +1018,20 @@ dependencies = [ ] [[package]] -name = "clang-sys" -version = "1.4.0" +name = "cipher" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + +[[package]] +name = "clang-sys" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" dependencies = [ "glob", "libc", @@ -1073,7 +1104,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.17", + "time 0.3.20", "timer", "tokio", "types", @@ -1134,9 +1165,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "convert_case" @@ -1178,12 +1209,6 @@ dependencies = [ "libc", ] -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - [[package]] name = "crc" version = "3.0.1" @@ -1246,9 +1271,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1256,9 +1281,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1267,22 +1292,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", - "memoffset 0.7.1", + "memoffset 0.8.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -1312,6 +1337,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core 0.6.4", "typenum", ] @@ -1325,16 +1351,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "crypto-mac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "crypto-mac" version = "0.11.1" @@ -1347,9 +1363,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" +checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" dependencies = [ "csv-core", "itoa", @@ -1366,15 +1382,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" -dependencies = [ - "cipher 0.2.5", -] - [[package]] name = "ctr" version = "0.8.0" @@ -1384,6 +1391,15 @@ dependencies = [ "cipher 0.3.0", ] +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "ctrlc" version = "3.2.5" @@ -1409,9 +1425,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.0" +version = "4.0.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da00a7a9a4eb92a0a0f8e75660926d48f0d0f3c537e455c457bcdaa1e16b1ac" +checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" dependencies = [ "cfg-if", "fiat-crypto", @@ -1423,9 +1439,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -1435,9 +1451,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", @@ -1450,15 +1466,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ "proc-macro2", "quote", @@ -1477,12 +1493,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "darling_core 0.14.3", - "darling_macro 0.14.3", + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] @@ -1501,9 +1517,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", @@ -1526,11 +1542,11 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "darling_core 0.14.3", + "darling_core 0.14.4", "quote", "syn", ] @@ -1656,11 +1672,11 @@ dependencies = [ [[package]] name = "der-parser" -version = "8.1.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", "displaydoc", "nom 7.1.3", "num-bigint", @@ -1704,7 +1720,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" dependencies = [ - "darling 0.14.3", + "darling 0.14.4", "proc-macro2", "quote", "syn", @@ -1748,7 +1764,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1850,9 +1866,9 @@ dependencies = [ [[package]] name = "dtoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c00704156a7de8df8da0911424e30c2049957b0a714542a44e05fe693dd85313" +checksum = "65d09067bfacaa79114679b279d7f5885b53295b1e2cfb4e79c8e4bd3d633169" [[package]] name = "ecdsa" @@ -2384,7 +2400,7 @@ dependencies = [ "hex", "integer-sqrt", "multiaddr 0.14.0", - "multihash", + "multihash 0.16.3", "rand 0.8.5", "serde", "serde_json", @@ -2616,18 +2632,18 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a214f5bb88731d436478f3ae1f8a277b62124089ba9fb67f4f93fb100ef73c90" +checksum = "93ace6ec7cc19c8ed33a32eaa9ea692d7faea05006b5356b9e2b668ec4bc3955" [[package]] name = "field-offset" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e1c54951450cbd39f3dbcf1005ac413b49487dabf18a720ad2383eccfeffb92" +checksum = "a3cf3a800ff6e860c863ca6d4b16fd999db8b752819c1606884047b73e468535" dependencies = [ - "memoffset 0.6.5", - "rustc_version 0.3.3", + "memoffset 0.8.0", + "rustc_version 0.4.0", ] [[package]] @@ -2749,9 +2765,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" dependencies = [ "futures-channel", "futures-core", @@ -2764,9 +2780,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" dependencies = [ "futures-core", "futures-sink", @@ -2774,15 +2790,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" dependencies = [ "futures-core", "futures-task", @@ -2792,9 +2808,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" [[package]] name = "futures-lite" @@ -2813,9 +2829,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" dependencies = [ "proc-macro2", "quote", @@ -2835,15 +2851,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" [[package]] name = "futures-timer" @@ -2853,9 +2869,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" dependencies = [ "futures-channel", "futures-core", @@ -2935,16 +2951,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "ghash" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" -dependencies = [ - "opaque-debug", - "polyval 0.4.5", -] - [[package]] name = "ghash" version = "0.4.4" @@ -2956,10 +2962,20 @@ dependencies = [ ] [[package]] -name = "gimli" -version = "0.27.1" +name = "ghash" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +dependencies = [ + "opaque-debug", + "polyval 0.6.0", +] + +[[package]] +name = "gimli" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "git-version" @@ -3002,9 +3018,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes", "fnv", @@ -3165,16 +3181,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.11.0" @@ -3218,9 +3224,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -3331,9 +3337,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -3559,6 +3565,15 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + [[package]] name = "instant" version = "0.1.12" @@ -3610,9 +3625,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" dependencies = [ "libc", "windows-sys 0.45.0", @@ -3647,9 +3662,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jemalloc-ctl" @@ -3829,15 +3844,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libflate" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05605ab2bce11bcfc0e9c635ff29ef8b2ea83f29be257ee7d730cac3ee373093" +checksum = "97822bf791bd4d5b403713886a5fbe8bf49520fe78e323b0dc480ca1a03e50b0" dependencies = [ "adler32", "crc32fast", @@ -3846,9 +3861,9 @@ dependencies = [ [[package]] name = "libflate_lz77" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a734c0493409afcd49deee13c006a04e3586b9761a03543c6272c9c51f2f5a" +checksum = "a52d3a8bfc85f250440e4424db7d857e241a3aebbbe301f3eb606ab15c39acbf" dependencies = [ "rle-decode-fast", ] @@ -3892,9 +3907,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.50.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e0a0d2f693675f49ded13c5d510c48b78069e23cbd9108d7ccd59f6dc568819" +checksum = "9c7b0104790be871edcf97db9bd2356604984e623a08d825c3f27852290266b8" dependencies = [ "bytes", "futures", @@ -3940,7 +3955,7 @@ dependencies = [ "libsecp256k1", "log", "multiaddr 0.14.0", - "multihash", + "multihash 0.16.3", "multistream-select 0.11.0", "p256", "parking_lot 0.12.1", @@ -3974,7 +3989,7 @@ dependencies = [ "libsecp256k1", "log", "multiaddr 0.16.0", - "multihash", + "multihash 0.16.3", "multistream-select 0.12.1", "once_cell", "p256", @@ -3993,6 +4008,34 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-core" +version = "0.39.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7f8b7d65c070a5a1b5f8f0510648189da08f787b8963f8e21219e0710733af" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-identity", + "log", + "multiaddr 0.17.0", + "multihash 0.17.0", + "multistream-select 0.12.1", + "once_cell", + "parking_lot 0.12.1", + "pin-project", + "quick-protobuf", + "rand 0.8.5", + "rw-stream-sink", + "smallvec", + "thiserror", + "unsigned-varint 0.7.1", + "void", +] + [[package]] name = "libp2p-dns" version = "0.38.0" @@ -4058,6 +4101,25 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-identity" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6c9cb71e2333d31f18e7556b9a5f1d0a2e013effc9325e36f436be65fe7bd2" +dependencies = [ + "bs58", + "ed25519-dalek", + "log", + "multiaddr 0.17.0", + "multihash 0.17.0", + "prost", + "prost-build", + "quick-protobuf", + "rand 0.8.5", + "thiserror", + "zeroize", +] + [[package]] name = "libp2p-mdns" version = "0.42.0" @@ -4221,13 +4283,14 @@ dependencies = [ [[package]] name = "libp2p-tls" -version = "0.1.0-alpha" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7905ce0d040576634e8a3229a7587cc8beab83f79db6023800f1792895defa8" +checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.38.0", + "libp2p-core 0.39.1", + "libp2p-identity", "rcgen 0.10.0", "ring", "rustls 0.20.8", @@ -4253,7 +4316,7 @@ dependencies = [ "libp2p-core 0.38.0", "libp2p-noise", "log", - "multihash", + "multihash 0.16.3", "prost", "prost-build", "prost-codec", @@ -4685,9 +4748,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg 1.1.0", ] @@ -4835,7 +4898,7 @@ dependencies = [ "bs58", "byteorder", "data-encoding", - "multihash", + "multihash 0.16.3", "percent-encoding", "serde", "static_assertions", @@ -4853,7 +4916,25 @@ dependencies = [ "byteorder", "data-encoding", "multibase", - "multihash", + "multihash 0.16.3", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.1", + "url", +] + +[[package]] +name = "multiaddr" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b53e0cc5907a5c216ba6584bf74be8ab47d6d6289f72793b2dddbf15dc3bf8c" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "multibase", + "multihash 0.17.0", "percent-encoding", "serde", "static_assertions", @@ -4885,6 +4966,19 @@ dependencies = [ "unsigned-varint 0.7.1", ] +[[package]] +name = "multihash" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +dependencies = [ + "core2", + "digest 0.10.6", + "multihash-derive", + "sha2 0.10.6", + "unsigned-varint 0.7.1", +] + [[package]] name = "multihash-derive" version = "0.8.1" @@ -5024,9 +5118,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e21fbb6f3d253a14df90eb0000a6066780a15dd901a7519ce02d77a94985b" +checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" dependencies = [ "bytes", "futures", @@ -5277,7 +5371,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", ] [[package]] @@ -5555,9 +5649,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" @@ -5607,16 +5701,6 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" -[[package]] -name = "pest" -version = "2.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" -dependencies = [ - "thiserror", - "ucd-trie", -] - [[package]] name = "petgraph" version = "0.6.3" @@ -5733,16 +5817,18 @@ dependencies = [ [[package]] name = "polling" -version = "2.5.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg 1.1.0", + "bitflags", "cfg-if", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite 0.2.9", + "windows-sys 0.45.0", ] [[package]] @@ -5753,18 +5839,7 @@ checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" -dependencies = [ - "cpuid-bool", - "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", ] [[package]] @@ -5776,7 +5851,19 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", +] + +[[package]] +name = "polyval" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash 0.5.0", ] [[package]] @@ -5787,9 +5874,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" +checksum = "4ebcd279d20a4a0a2404a33056388e950504d891c855c7975b9a8fef75f3bf04" dependencies = [ "proc-macro2", "syn", @@ -5864,9 +5951,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ "unicode-ident", ] @@ -5923,9 +6010,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" +checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537" dependencies = [ "bytes", "prost-derive", @@ -5933,9 +6020,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" +checksum = "2c828f93f5ca4826f97fedcbd3f9a536c16b12cff3dbbb4a007f932bbad95b12" dependencies = [ "bytes", "heck", @@ -5968,9 +6055,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" +checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b" dependencies = [ "anyhow", "itertools", @@ -5981,11 +6068,10 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" +checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88" dependencies = [ - "bytes", "prost", ] @@ -6033,6 +6119,15 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + [[package]] name = "quickcheck" version = "0.9.2" @@ -6087,9 +6182,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -6209,9 +6304,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -6219,9 +6314,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -6237,7 +6332,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring", - "time 0.3.17", + "time 0.3.20", "x509-parser 0.13.2", "yasna", ] @@ -6250,7 +6345,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", - "time 0.3.17", + "time 0.3.20", "yasna", ] @@ -6498,22 +6593,13 @@ dependencies = [ "semver 0.9.0", ] -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", -] - [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.16", + "semver 1.0.17", ] [[package]] @@ -6575,9 +6661,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "rw-stream-sink" @@ -6592,9 +6678,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safe_arith" @@ -6659,9 +6745,9 @@ dependencies = [ [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot 0.12.1", ] @@ -6680,9 +6766,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "scrypt" @@ -6789,23 +6875,14 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser 0.7.0", + "semver-parser", ] [[package]] name = "semver" -version = "0.11.0" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser 0.10.2", -] - -[[package]] -name = "semver" -version = "1.0.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "semver-parser" @@ -6813,15 +6890,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "send_wrapper" version = "0.6.0" @@ -6838,9 +6906,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.152" +version = "1.0.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "71f2b4817415c6d4210bfe1c7bfcf4801b2d904cb4d0e1a8fdb651013c9e86b8" dependencies = [ "serde_derive", ] @@ -6867,9 +6935,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d071a94a3fac4aff69d023a7f411e33f40f3483f8c5190b1953822b6b76d7630" dependencies = [ "proc-macro2", "quote", @@ -6878,9 +6946,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", @@ -6889,9 +6957,9 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +checksum = "395627de918015623b32e7669714206363a7fc00382bf477e72c1f7533e8eafc" dependencies = [ "proc-macro2", "quote", @@ -7068,7 +7136,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7091,9 +7159,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg 1.1.0", ] @@ -7194,7 +7262,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7239,7 +7307,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7290,14 +7358,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ba5f4d4ff12bdb6a169ed51b7c48c0e0ac4b0b4b31012b2571e97d78d3201d" +checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.0", + "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -7307,9 +7375,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -7548,9 +7616,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -7722,18 +7790,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", @@ -7772,9 +7840,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", "libc", @@ -7792,9 +7860,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -7938,9 +8006,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite 0.2.9", @@ -8368,12 +8436,6 @@ dependencies = [ "tree_hash_derive", ] -[[package]] -name = "ucd-trie" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" - [[package]] name = "uint" version = "0.9.5" @@ -8404,15 +8466,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -8445,6 +8507,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "universal-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "unsigned-varint" version = "0.6.0" @@ -8970,7 +9042,7 @@ dependencies = [ "sha2 0.10.6", "stun", "thiserror", - "time 0.3.17", + "time 0.3.20", "tokio", "turn", "url", @@ -9002,22 +9074,22 @@ dependencies = [ [[package]] name = "webrtc-dtls" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7021987ae0a2ed6c8cd33f68e98e49bb6e74ffe9543310267b48a1bbe3900e5f" +checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" dependencies = [ "aes 0.6.0", - "aes-gcm 0.8.0", + "aes-gcm 0.10.1", "async-trait", "bincode", "block-modes", "byteorder", "ccm", "curve25519-dalek 3.2.0", - "der-parser 8.1.0", + "der-parser 8.2.0", "elliptic-curve", "hkdf", - "hmac 0.10.1", + "hmac 0.12.1", "log", "oid-registry 0.6.1", "p256", @@ -9029,8 +9101,8 @@ dependencies = [ "rustls 0.19.1", "sec1", "serde", - "sha-1 0.9.8", - "sha2 0.9.9", + "sha1", + "sha2 0.10.6", "signature", "subtle", "thiserror", @@ -9156,15 +9228,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" version = "4.4.0" @@ -9251,12 +9314,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.1", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -9270,24 +9333,24 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.1", + "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" @@ -9297,9 +9360,9 @@ checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" @@ -9309,9 +9372,9 @@ checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" @@ -9321,9 +9384,9 @@ checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" @@ -9333,15 +9396,15 @@ checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" @@ -9351,9 +9414,9 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "winreg" @@ -9436,7 +9499,7 @@ dependencies = [ "ring", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -9445,16 +9508,16 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", "base64 0.13.1", "data-encoding", - "der-parser 8.1.0", + "der-parser 8.2.0", "lazy_static", "nom 7.1.3", "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -9501,7 +9564,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aed2e7a52e3744ab4d0c05c20aa065258e84c49fd4226f5191b2ed29712710b4" dependencies = [ - "time 0.3.17", + "time 0.3.20", ] [[package]] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9802935b22..11eda4dead 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -432,6 +432,46 @@ pub struct BeaconChain { type BeaconBlockAndState = (BeaconBlock, BeaconState); impl BeaconChain { + /// Checks if a block is finalized. + /// The finalization check is done with the block slot. The block root is used to verify that + /// the finalized slot is in the canonical chain. + pub fn is_finalized_block( + &self, + block_root: &Hash256, + block_slot: Slot, + ) -> Result { + let finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let is_canonical = self + .block_root_at_slot(block_slot, WhenSlotSkipped::None)? + .map_or(false, |canonical_root| block_root == &canonical_root); + Ok(block_slot <= finalized_slot && is_canonical) + } + + /// Checks if a state is finalized. + /// The finalization check is done with the slot. The state root is used to verify that + /// the finalized state is in the canonical chain. + pub fn is_finalized_state( + &self, + state_root: &Hash256, + state_slot: Slot, + ) -> Result { + let finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let is_canonical = self + .state_root_at_slot(state_slot)? + .map_or(false, |canonical_root| state_root == &canonical_root); + Ok(state_slot <= finalized_slot && is_canonical) + } + /// Persists the head tracker and fork choice. /// /// We do it atomically even though no guarantees need to be made about blocks from diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 9febae5b19..5c3e420839 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -114,8 +114,10 @@ fn compute_historic_attester_duties( )?; (state, execution_optimistic) } else { - StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) - .state(chain)? + let (state, execution_optimistic, _finalized) = + StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(chain)?; + (state, execution_optimistic) }; // Sanity-check the state lookup. diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 5c785fe651..f1a42b8744 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -4,13 +4,15 @@ use eth2::types::BlockId as CoreBlockId; use std::fmt; use std::str::FromStr; use std::sync::Arc; -use types::{Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; +use types::{EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. #[derive(Debug)] pub struct BlockId(pub CoreBlockId); +type Finalized = bool; + impl BlockId { pub fn from_slot(slot: Slot) -> Self { Self(CoreBlockId::Slot(slot)) @@ -24,7 +26,7 @@ impl BlockId { pub fn root( &self, chain: &BeaconChain, - ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { + ) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -34,22 +36,23 @@ impl BlockId { Ok(( cached_head.head_block_root(), execution_status.is_optimistic_or_invalid(), + false, )) } - CoreBlockId::Genesis => Ok((chain.genesis_block_root, false)), + CoreBlockId::Genesis => Ok((chain.genesis_block_root, false, true)), CoreBlockId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); let (_slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; - Ok((finalized_checkpoint.root, execution_optimistic)) + Ok((finalized_checkpoint.root, execution_optimistic, true)) } CoreBlockId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); let (_slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; - Ok((justified_checkpoint.root, execution_optimistic)) + Ok((justified_checkpoint.root, execution_optimistic, false)) } CoreBlockId::Slot(slot) => { let execution_optimistic = chain @@ -66,7 +69,14 @@ impl BlockId { )) }) })?; - Ok((root, execution_optimistic)) + let finalized = *slot + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + Ok((root, execution_optimistic, finalized)) } CoreBlockId::Root(root) => { // This matches the behaviour of other consensus clients (e.g. Teku). @@ -88,7 +98,20 @@ impl BlockId { .is_optimistic_or_invalid_block(root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - Ok((*root, execution_optimistic)) + let blinded_block = chain + .get_blinded_block(root) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + })?; + let block_slot = blinded_block.slot(); + let finalized = chain + .is_finalized_block(root, block_slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok((*root, execution_optimistic, finalized)) } else { Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -103,7 +126,14 @@ impl BlockId { pub fn blinded_block( &self, chain: &BeaconChain, - ) -> Result<(SignedBlindedBeaconBlock, ExecutionOptimistic), warp::Rejection> { + ) -> Result< + ( + SignedBlindedBeaconBlock, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -113,10 +143,11 @@ impl BlockId { Ok(( cached_head.snapshot.beacon_block.clone_as_blinded(), execution_status.is_optimistic_or_invalid(), + false, )) } CoreBlockId::Slot(slot) => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -128,7 +159,7 @@ impl BlockId { slot ))); } - Ok((block, execution_optimistic)) + Ok((block, execution_optimistic, finalized)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -137,7 +168,7 @@ impl BlockId { }) } _ => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; let block = chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -149,7 +180,7 @@ impl BlockId { )) }) })?; - Ok((block, execution_optimistic)) + Ok((block, execution_optimistic, finalized)) } } } @@ -158,7 +189,14 @@ impl BlockId { pub async fn full_block( &self, chain: &BeaconChain, - ) -> Result<(Arc>, ExecutionOptimistic), warp::Rejection> { + ) -> Result< + ( + Arc>, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -168,10 +206,11 @@ impl BlockId { Ok(( cached_head.snapshot.beacon_block.clone(), execution_status.is_optimistic_or_invalid(), + false, )) } CoreBlockId::Slot(slot) => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_block(&root) .await @@ -184,7 +223,7 @@ impl BlockId { slot ))); } - Ok((Arc::new(block), execution_optimistic)) + Ok((Arc::new(block), execution_optimistic, finalized)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -193,14 +232,14 @@ impl BlockId { }) } _ => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|block_opt| { block_opt - .map(|block| (Arc::new(block), execution_optimistic)) + .map(|block| (Arc::new(block), execution_optimistic, finalized)) .ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "beacon block with root {}", diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b36adc5c08..aa52466e26 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -63,7 +63,7 @@ use types::{ SyncCommitteeMessage, SyncContributionData, }; use version::{ - add_consensus_version_header, execution_optimistic_fork_versioned_response, + add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response, fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2, }; use warp::http::StatusCode; @@ -522,12 +522,13 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (root, execution_optimistic) = state_id.root(&chain)?; - + let (root, execution_optimistic, finalized) = state_id.root(&chain)?; Ok(root) .map(api_types::RootData::from) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }); @@ -538,11 +539,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (fork, execution_optimistic) = - state_id.fork_and_execution_optimistic(&chain)?; - Ok(api_types::ExecutionOptimisticResponse { + let (fork, execution_optimistic, finalized) = + state_id.fork_and_execution_optimistic_and_finalized(&chain)?; + Ok(api_types::ExecutionOptimisticFinalizedResponse { data: fork, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }); @@ -554,23 +556,26 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id.map_state_and_execution_optimistic( - &chain, - |state, execution_optimistic| { - Ok(( - api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }, - execution_optimistic, - )) - }, - )?; + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + finalized, + )) + }, + )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }); @@ -587,10 +592,10 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { Ok(( state .validators() @@ -618,13 +623,15 @@ pub fn serve( }) .collect::>(), execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -642,10 +649,10 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let epoch = state.current_epoch(); let far_future_epoch = chain.spec.far_future_epoch; @@ -695,13 +702,15 @@ pub fn serve( }) .collect::>(), execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -720,10 +729,10 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { state.validators().iter().position(|v| v.pubkey == *pubkey) @@ -757,13 +766,15 @@ pub fn serve( )) })?, execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -778,10 +789,10 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, query: api_types::CommitteesQuery| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let current_epoch = state.current_epoch(); let epoch = query.epoch.unwrap_or(current_epoch); @@ -937,12 +948,13 @@ pub fn serve( } } - Ok((response, execution_optimistic)) + Ok((response, execution_optimistic, finalized)) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -959,10 +971,10 @@ pub fn serve( chain: Arc>, query: api_types::SyncCommitteesQuery| { blocking_json_task(move || { - let (sync_committee, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (sync_committee, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let current_epoch = state.current_epoch(); let epoch = query.epoch.unwrap_or(current_epoch); Ok(( @@ -972,9 +984,10 @@ pub fn serve( .map_err(|e| match e { BeaconStateError::SyncCommitteeNotKnown { .. } => { warp_utils::reject::custom_bad_request(format!( - "state at epoch {} has no sync committee for epoch {}", - current_epoch, epoch - )) + "state at epoch {} has no \ + sync committee for epoch {}", + current_epoch, epoch + )) } BeaconStateError::IncorrectStateVariant => { warp_utils::reject::custom_bad_request(format!( @@ -985,6 +998,7 @@ pub fn serve( e => warp_utils::reject::beacon_state_error(e), })?, execution_optimistic, + finalized, )) }, )?; @@ -1006,7 +1020,7 @@ pub fn serve( }; Ok(api_types::GenericResponse::from(response) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); @@ -1020,23 +1034,23 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, query: api_types::RandaoQuery| { blocking_json_task(move || { - let (randao, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (randao, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); let randao = *state.get_randao_mix(epoch).map_err(|e| { warp_utils::reject::custom_bad_request(format!( "epoch out of range: {e:?}" )) })?; - Ok((randao, execution_optimistic)) + Ok((randao, execution_optimistic, finalized)) }, )?; Ok( api_types::GenericResponse::from(api_types::RandaoMix { randao }) - .add_execution_optimistic(execution_optimistic), + .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }, @@ -1058,72 +1072,73 @@ pub fn serve( .and_then( |query: api_types::HeadersQuery, chain: Arc>| { blocking_json_task(move || { - let (root, block, execution_optimistic) = match (query.slot, query.parent_root) - { - // No query parameters, return the canonical head block. - (None, None) => { - let (cached_head, execution_status) = chain - .canonical_head - .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; - ( - cached_head.head_block_root(), - cached_head.snapshot.beacon_block.clone_as_blinded(), - execution_status.is_optimistic_or_invalid(), - ) - } - // Only the parent root parameter, do a forwards-iterator lookup. - (None, Some(parent_root)) => { - let (parent, execution_optimistic) = - BlockId::from_root(parent_root).blinded_block(&chain)?; - let (root, _slot) = chain - .forwards_iter_block_roots(parent.slot()) - .map_err(warp_utils::reject::beacon_chain_error)? - // Ignore any skip-slots immediately following the parent. - .find(|res| { - res.as_ref().map_or(false, |(root, _)| *root != parent_root) - }) - .transpose() - .map_err(warp_utils::reject::beacon_chain_error)? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "child of block with root {}", - parent_root - )) - })?; - - BlockId::from_root(root) - .blinded_block(&chain) - // Ignore this `execution_optimistic` since the first value has - // more information about the original request. - .map(|(block, _execution_optimistic)| { - (root, block, execution_optimistic) - })? - } - // Slot is supplied, search by slot and optionally filter by - // parent root. - (Some(slot), parent_root_opt) => { - let (root, execution_optimistic) = - BlockId::from_slot(slot).root(&chain)?; - // Ignore the second `execution_optimistic`, the first one is the - // most relevant since it knows that we queried by slot. - let (block, _execution_optimistic) = - BlockId::from_root(root).blinded_block(&chain)?; - - // If the parent root was supplied, check that it matches the block - // obtained via a slot lookup. - if let Some(parent_root) = parent_root_opt { - if block.parent_root() != parent_root { - return Err(warp_utils::reject::custom_not_found(format!( - "no canonical block at slot {} with parent root {}", - slot, parent_root - ))); - } + let (root, block, execution_optimistic, finalized) = + match (query.slot, query.parent_root) { + // No query parameters, return the canonical head block. + (None, None) => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + ( + cached_head.head_block_root(), + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + false, + ) } + // Only the parent root parameter, do a forwards-iterator lookup. + (None, Some(parent_root)) => { + let (parent, execution_optimistic, _parent_finalized) = + BlockId::from_root(parent_root).blinded_block(&chain)?; + let (root, _slot) = chain + .forwards_iter_block_roots(parent.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + // Ignore any skip-slots immediately following the parent. + .find(|res| { + res.as_ref().map_or(false, |(root, _)| *root != parent_root) + }) + .transpose() + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "child of block with root {}", + parent_root + )) + })?; - (root, block, execution_optimistic) - } - }; + BlockId::from_root(root) + .blinded_block(&chain) + // Ignore this `execution_optimistic` since the first value has + // more information about the original request. + .map(|(block, _execution_optimistic, finalized)| { + (root, block, execution_optimistic, finalized) + })? + } + // Slot is supplied, search by slot and optionally filter by + // parent root. + (Some(slot), parent_root_opt) => { + let (root, execution_optimistic, finalized) = + BlockId::from_slot(slot).root(&chain)?; + // Ignore the second `execution_optimistic`, the first one is the + // most relevant since it knows that we queried by slot. + let (block, _execution_optimistic, _finalized) = + BlockId::from_root(root).blinded_block(&chain)?; + + // If the parent root was supplied, check that it matches the block + // obtained via a slot lookup. + if let Some(parent_root) = parent_root_opt { + if block.parent_root() != parent_root { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); + } + } + + (root, block, execution_optimistic, finalized) + } + }; let data = api_types::BlockHeaderData { root, @@ -1135,7 +1150,7 @@ pub fn serve( }; Ok(api_types::GenericResponse::from(vec![data]) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); @@ -1153,10 +1168,10 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (root, execution_optimistic) = block_id.root(&chain)?; + let (root, execution_optimistic, finalized) = block_id.root(&chain)?; // Ignore the second `execution_optimistic` since the first one has more // information about the original request. - let (block, _execution_optimistic) = + let (block, _execution_optimistic, _finalized) = BlockId::from_root(root).blinded_block(&chain)?; let canonical = chain @@ -1173,8 +1188,9 @@ pub fn serve( }, }; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), data, }) }) @@ -1263,7 +1279,8 @@ pub fn serve( chain: Arc>, accept_header: Option| { async move { - let (block, execution_optimistic) = block_id.full_block(&chain).await?; + let (block, execution_optimistic, finalized) = + block_id.full_block(&chain).await?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1279,10 +1296,11 @@ pub fn serve( e )) }), - _ => execution_optimistic_fork_versioned_response( + _ => execution_optimistic_finalized_fork_versioned_response( endpoint_version, fork_name, execution_optimistic, + finalized, block, ) .map(|res| warp::reply::json(&res).into_response()), @@ -1299,12 +1317,11 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; - + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; Ok(api_types::GenericResponse::from(api_types::RootData::from( block.canonical_root(), )) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }); @@ -1315,11 +1332,10 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; - + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; Ok( api_types::GenericResponse::from(block.message().body().attestations().clone()) - .add_execution_optimistic(execution_optimistic), + .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }); @@ -1337,7 +1353,8 @@ pub fn serve( chain: Arc>, accept_header: Option| { blocking_response_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1355,10 +1372,11 @@ pub fn serve( }), _ => { // Post as a V2 endpoint so we return the fork version. - execution_optimistic_fork_versioned_response( + execution_optimistic_finalized_fork_versioned_response( V2, fork_name, execution_optimistic, + finalized, block, ) .map(|res| warp::reply::json(&res).into_response()) @@ -1899,11 +1917,13 @@ pub fn serve( .and(warp::path::end()) .and_then(|chain: Arc>, block_id: BlockId| { blocking_json_task(move || { - let (rewards, execution_optimistic) = + let (rewards, execution_optimistic, finalized) = standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; Ok(rewards) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }); @@ -1982,14 +2002,16 @@ pub fn serve( validators: Vec, log: Logger| { blocking_json_task(move || { - let (rewards, execution_optimistic) = + let (rewards, execution_optimistic, finalized) = sync_committee_rewards::compute_sync_committee_rewards( chain, block_id, validators, log, )?; Ok(rewards) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); @@ -2072,7 +2094,7 @@ pub fn serve( // We can ignore the optimistic status for the "fork" since it's a // specification constant that doesn't change across competing heads of the // beacon chain. - let (state, _execution_optimistic) = state_id.state(&chain)?; + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -2090,16 +2112,17 @@ pub fn serve( )) }) } - _ => state_id.map_state_and_execution_optimistic( + _ => state_id.map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - let res = execution_optimistic_fork_versioned_response( + let res = execution_optimistic_finalized_fork_versioned_response( endpoint_version, fork_name, execution_optimistic, + finalized, &state, )?; Ok(add_consensus_version_header( @@ -3483,7 +3506,7 @@ pub fn serve( .and_then(|state_id: StateId, chain: Arc>| { blocking_response_task(move || { // This debug endpoint provides no indication of optimistic status. - let (state, _execution_optimistic) = state_id.state(&chain)?; + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; Response::builder() .status(200) .header("Content-Type", "application/ssz") diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 877d64e20f..7e946b89e7 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -209,7 +209,9 @@ fn compute_historic_proposer_duties( .map_err(warp_utils::reject::beacon_chain_error)?; (state, execution_optimistic) } else { - StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + let (state, execution_optimistic, _finalized) = + StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?; + (state, execution_optimistic) }; // Ensure the state lookup was correct. diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index b3c90d08a4..de7e5eb7d3 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -10,8 +10,8 @@ use warp_utils::reject::beacon_chain_error; pub fn compute_beacon_block_rewards( chain: Arc>, block_id: BlockId, -) -> Result<(StandardBlockReward, ExecutionOptimistic), warp::Rejection> { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; +) -> Result<(StandardBlockReward, ExecutionOptimistic, bool), warp::Rejection> { + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; let block_ref = block.message(); @@ -23,5 +23,5 @@ pub fn compute_beacon_block_rewards( .compute_beacon_block_reward(block_ref, block_root, &mut state) .map_err(beacon_chain_error)?; - Ok((rewards, execution_optimistic)) + Ok((rewards, execution_optimistic, finalized)) } diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 44354217bc..9e4aadef17 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -10,6 +10,9 @@ use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; #[derive(Debug)] pub struct StateId(pub CoreStateId); +// More clarity when returning if the state is finalized or not in the root function. +type Finalized = bool; + impl StateId { pub fn from_slot(slot: Slot) -> Self { Self(CoreStateId::Slot(slot)) @@ -19,8 +22,8 @@ impl StateId { pub fn root( &self, chain: &BeaconChain, - ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { - let (slot, execution_optimistic) = match &self.0 { + ) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> { + let (slot, execution_optimistic, finalized) = match &self.0 { CoreStateId::Head => { let (cached_head, execution_status) = chain .canonical_head @@ -29,24 +32,36 @@ impl StateId { return Ok(( cached_head.head_state_root(), execution_status.is_optimistic_or_invalid(), + false, )); } - CoreStateId::Genesis => return Ok((chain.genesis_state_root, false)), + CoreStateId::Genesis => return Ok((chain.genesis_state_root, false, true)), CoreStateId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); - checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)? + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; + (slot, execution_optimistic, true) } CoreStateId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); - checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)? + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; + (slot, execution_optimistic, false) } CoreStateId::Slot(slot) => ( *slot, chain .is_optimistic_or_invalid_head() .map_err(warp_utils::reject::beacon_chain_error)?, + *slot + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), ), CoreStateId::Root(root) => { if let Some(hot_summary) = chain @@ -61,7 +76,10 @@ impl StateId { .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - return Ok((*root, execution_optimistic)); + let finalized = chain + .is_finalized_state(root, hot_summary.slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic, finalized)); } else if let Some(_cold_state_slot) = chain .store .load_cold_state_slot(root) @@ -77,7 +95,7 @@ impl StateId { .is_optimistic_or_invalid_block_no_fallback(&finalized_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - return Ok((*root, execution_optimistic)); + return Ok((*root, execution_optimistic, true)); } else { return Err(warp_utils::reject::custom_not_found(format!( "beacon state for state root {}", @@ -94,7 +112,7 @@ impl StateId { warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) })?; - Ok((root, execution_optimistic)) + Ok((root, execution_optimistic, finalized)) } /// Return the `fork` field of the state identified by `self`. @@ -103,9 +121,25 @@ impl StateId { &self, chain: &BeaconChain, ) -> Result<(Fork, bool), warp::Rejection> { - self.map_state_and_execution_optimistic(chain, |state, execution_optimistic| { - Ok((state.fork(), execution_optimistic)) - }) + self.map_state_and_execution_optimistic_and_finalized( + chain, + |state, execution_optimistic, _finalized| Ok((state.fork(), execution_optimistic)), + ) + } + + /// Return the `fork` field of the state identified by `self`. + /// Also returns the `execution_optimistic` value of the state. + /// Also returns the `finalized` value of the state. + pub fn fork_and_execution_optimistic_and_finalized( + &self, + chain: &BeaconChain, + ) -> Result<(Fork, bool, bool), warp::Rejection> { + self.map_state_and_execution_optimistic_and_finalized( + chain, + |state, execution_optimistic, finalized| { + Ok((state.fork(), execution_optimistic, finalized)) + }, + ) } /// Convenience function to compute `fork` when `execution_optimistic` isn't desired. @@ -121,8 +155,8 @@ impl StateId { pub fn state( &self, chain: &BeaconChain, - ) -> Result<(BeaconState, ExecutionOptimistic), warp::Rejection> { - let ((state_root, execution_optimistic), slot_opt) = match &self.0 { + ) -> Result<(BeaconState, ExecutionOptimistic, Finalized), warp::Rejection> { + let ((state_root, execution_optimistic, finalized), slot_opt) = match &self.0 { CoreStateId::Head => { let (cached_head, execution_status) = chain .canonical_head @@ -134,6 +168,7 @@ impl StateId { .beacon_state .clone_with_only_committee_caches(), execution_status.is_optimistic_or_invalid(), + false, )); } CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), @@ -152,24 +187,25 @@ impl StateId { }) })?; - Ok((state, execution_optimistic)) + Ok((state, execution_optimistic, finalized)) } /// Map a function across the `BeaconState` identified by `self`. /// - /// The optimistic status of the requested state is also provided to the `func` closure. + /// The optimistic and finalization status of the requested state is also provided to the `func` + /// closure. /// /// This function will avoid instantiating/copying a new state when `self` points to the head /// of the chain. - pub fn map_state_and_execution_optimistic( + pub fn map_state_and_execution_optimistic_and_finalized( &self, chain: &BeaconChain, func: F, ) -> Result where - F: Fn(&BeaconState, bool) -> Result, + F: Fn(&BeaconState, bool, bool) -> Result, { - let (state, execution_optimistic) = match &self.0 { + let (state, execution_optimistic, finalized) = match &self.0 { CoreStateId::Head => { let (head, execution_status) = chain .canonical_head @@ -178,12 +214,13 @@ impl StateId { return func( &head.snapshot.beacon_state, execution_status.is_optimistic_or_invalid(), + false, ); } _ => self.state(chain)?, }; - func(&state, execution_optimistic) + func(&state, execution_optimistic, finalized) } } diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index cefa98db41..68a06b1ce8 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -13,8 +13,8 @@ pub fn compute_sync_committee_rewards( block_id: BlockId, validators: Vec, log: Logger, -) -> Result<(Option>, ExecutionOptimistic), warp::Rejection> { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; +) -> Result<(Option>, ExecutionOptimistic, bool), warp::Rejection> { + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; let mut state = get_state_before_applying_block(chain.clone(), &block)?; @@ -44,7 +44,7 @@ pub fn compute_sync_committee_rewards( ) }; - Ok((data, execution_optimistic)) + Ok((data, execution_optimistic, finalized)) } pub fn get_state_before_applying_block( diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index 917e85e649..f22ced1e69 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -18,7 +18,7 @@ fn end_of_epoch_state( let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); // The execution status is not returned, any functions which rely upon this method might return // optimistic information without explicitly declaring so. - let (state, _execution_status) = StateId::from_slot(target_slot).state(chain)?; + let (state, _execution_status, _finalized) = StateId::from_slot(target_slot).state(chain)?; Ok(state) } diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index e7fd8910b1..e01ff98220 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,9 +1,8 @@ +use crate::api_types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; use crate::api_types::EndpointVersion; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; -use types::{ - ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork, -}; +use types::{ForkName, ForkVersionedResponse, InconsistentFork}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); @@ -27,12 +26,13 @@ pub fn fork_versioned_response( }) } -pub fn execution_optimistic_fork_versioned_response( +pub fn execution_optimistic_finalized_fork_versioned_response( endpoint_version: EndpointVersion, fork_name: ForkName, execution_optimistic: bool, + finalized: bool, data: T, -) -> Result, warp::reject::Rejection> { +) -> Result, warp::reject::Rejection> { let fork_name = if endpoint_version == V1 { None } else if endpoint_version == V2 { @@ -40,9 +40,10 @@ pub fn execution_optimistic_fork_versioned_response( } else { return Err(unsupported_version_rejection(endpoint_version)); }; - Ok(ExecutionOptimisticForkVersionedResponse { + Ok(ExecutionOptimisticFinalizedForkVersionedResponse { version: fork_name, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), data, }) } diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 1112cc5ae2..2f1d5fd587 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -462,6 +462,264 @@ impl ApiTester { self } + // finalization tests + pub async fn test_beacon_states_root_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_root(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_fork_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_fork(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_finality_checkpoints_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_finality_checkpoints(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_headers_block_id_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_headers_block_id(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blocks_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .full_block fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_block + // occurs after those calls, and that they were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_blocks::(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .full_block fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_block + // occurs after those calls, and that they were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_blinded_blocks::(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_debug_beacon_states_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_debug_beacon_states::(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + pub async fn test_beacon_states_root(self) -> Self { for state_id in self.interesting_state_ids() { let result = self @@ -474,7 +732,7 @@ impl ApiTester { let expected = state_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); assert_eq!(result, expected, "{:?}", state_id); } @@ -508,15 +766,13 @@ impl ApiTester { .unwrap() .map(|res| res.data); - let expected = - state_id - .state(&self.chain) - .ok() - .map(|(state, _execution_optimistic)| FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }); + let expected = state_id.state(&self.chain).ok().map( + |(state, _execution_optimistic, _finalized)| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + ); assert_eq!(result, expected, "{:?}", state_id); } @@ -529,7 +785,9 @@ impl ApiTester { for validator_indices in self.interesting_validator_indices() { let state_opt = state_id.state(&self.chain).ok(); let validators: Vec = match state_opt.as_ref() { - Some((state, _execution_optimistic)) => state.validators().clone().into(), + Some((state, _execution_optimistic, _finalized)) => { + state.validators().clone().into() + } None => vec![], }; let validator_index_ids = validator_indices @@ -568,7 +826,7 @@ impl ApiTester { .unwrap() .map(|res| res.data); - let expected = state_opt.map(|(state, _execution_optimistic)| { + let expected = state_opt.map(|(state, _execution_optimistic, _finalized)| { let mut validators = Vec::with_capacity(validator_indices.len()); for i in validator_indices { @@ -598,7 +856,7 @@ impl ApiTester { let state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let validators: Vec = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -688,7 +946,7 @@ impl ApiTester { let state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let validators = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -743,7 +1001,7 @@ impl ApiTester { let mut state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let results = self @@ -790,7 +1048,7 @@ impl ApiTester { let mut state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let result = self @@ -900,7 +1158,7 @@ impl ApiTester { let block_root_opt = block_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); if let CoreBlockId::Slot(slot) = block_id.0 { if block_root_opt.is_none() { @@ -914,7 +1172,7 @@ impl ApiTester { .full_block(&self.chain) .await .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if block_opt.is_none() && result.is_none() { continue; @@ -960,7 +1218,7 @@ impl ApiTester { let expected = block_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); @@ -1007,7 +1265,7 @@ impl ApiTester { .full_block(&self.chain) .await .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { @@ -1091,7 +1349,7 @@ impl ApiTester { let expected = block_id .blinded_block(&self.chain) .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { @@ -1172,7 +1430,7 @@ impl ApiTester { .map(|res| res.data); let expected = block_id.full_block(&self.chain).await.ok().map( - |(block, _execution_optimistic)| { + |(block, _execution_optimistic, _finalized)| { block.message().body().attestations().clone().into() }, ); @@ -1593,7 +1851,7 @@ impl ApiTester { let mut expected = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); expected.as_mut().map(|state| state.drop_all_caches()); if let (Some(json), Some(expected)) = (&result_json, &expected) { @@ -3657,7 +3915,7 @@ impl ApiTester { let mut expected = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); expected.as_mut().map(|state| state.drop_all_caches()); assert_eq!(result, expected, "{:?}", state_id); @@ -4065,6 +4323,20 @@ async fn beacon_get() { .await .test_beacon_genesis() .await + .test_beacon_states_root_finalized() + .await + .test_beacon_states_fork_finalized() + .await + .test_beacon_states_finality_checkpoints_finalized() + .await + .test_beacon_headers_block_id_finalized() + .await + .test_beacon_blocks_finalized::() + .await + .test_beacon_blinded_blocks_finalized::() + .await + .test_debug_beacon_states_finalized() + .await .test_beacon_states_root() .await .test_beacon_states_fork() diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 3255006b55..02608f9a0b 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1741,7 +1741,7 @@ fn no_state_root_iter() -> Option Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -357,7 +358,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_fork( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -376,7 +377,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_finality_checkpoints( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -396,7 +397,8 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ids: Option<&[ValidatorId]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -426,7 +428,7 @@ impl BeaconNodeHttpClient { state_id: StateId, ids: Option<&[ValidatorId]>, statuses: Option<&[ValidatorStatus]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -466,7 +468,7 @@ impl BeaconNodeHttpClient { slot: Option, index: Option, epoch: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -499,7 +501,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result, Error> { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -522,7 +524,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -547,7 +549,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, validator_id: &ValidatorId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -568,7 +570,7 @@ impl BeaconNodeHttpClient { &self, slot: Option, parent_root: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -595,7 +597,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_headers_block_id( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -675,7 +677,10 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> { + ) -> Result< + Option>>, + Error, + > { let path = self.get_beacon_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, @@ -691,8 +696,10 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blinded_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> - { + ) -> Result< + Option>>, + Error, + > { let path = self.get_beacon_blinded_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, @@ -760,7 +767,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_root( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -779,7 +786,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_attestations( &self, block_id: BlockId, - ) -> Result>>>, Error> { + ) -> Result>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1267,7 +1274,8 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let path = self.get_debug_beacon_states_path(state_id)?; self.get_opt(path).await } @@ -1661,7 +1669,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, indices: &[u64], - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 0fddaaab20..97c7ff34e1 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -200,6 +200,14 @@ pub struct ExecutionOptimisticResponse { + pub execution_optimistic: Option, + pub finalized: Option, + pub data: T, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] pub struct GenericResponse { @@ -222,6 +230,18 @@ impl GenericResponse { data: self.data, } } + + pub fn add_execution_optimistic_finalized( + self, + execution_optimistic: bool, + finalized: bool, + ) -> ExecutionOptimisticFinalizedResponse { + ExecutionOptimisticFinalizedResponse { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + data: self.data, + } + } } #[derive(Debug, PartialEq, Clone, Serialize)] diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index 07ff40b27e..45df151eb4 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -5,6 +5,46 @@ use serde_json::value::Value; use std::sync::Arc; // Deserialize is only implemented for types that implement ForkVersionDeserialize +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ExecutionOptimisticFinalizedForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub execution_optimistic: Option, + pub finalized: Option, + pub data: T, +} + +impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticFinalizedForkVersionedResponse +where + F: ForkVersionDeserialize, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: Option, + execution_optimistic: Option, + finalized: Option, + data: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + let data = match helper.version { + Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, + None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, + }; + + Ok(ExecutionOptimisticFinalizedForkVersionedResponse { + version: helper.version, + execution_optimistic: helper.execution_optimistic, + finalized: helper.finalized, + data, + }) + } +} + #[derive(Debug, PartialEq, Clone, Serialize)] pub struct ExecutionOptimisticForkVersionedResponse { #[serde(skip_serializing_if = "Option::is_none")] From d351cc8d8d138a93c6c0b28b1bc128325ce9c662 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 30 Mar 2023 06:08:38 +0000 Subject: [PATCH 09/17] Test failing CI tests due to port conflicts (#4134) ## Issue Addressed #4127. PR to test port conflicts in CI tests . ## Proposed Changes See issue for more details, potential solution could be adding a cache bound by time to the `unused_port` function. --- Cargo.lock | 5 +++++ common/unused_port/Cargo.toml | 3 +++ common/unused_port/src/lib.rs | 27 ++++++++++++++++++++++++++- 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index c7cdf212bc..214f3baa35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8546,6 +8546,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "unused_port" version = "0.1.0" +dependencies = [ + "lazy_static", + "lru_cache", + "parking_lot 0.12.1", +] [[package]] name = "url" diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 06c1ca8f58..2dd041ff07 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -6,3 +6,6 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +lru_cache = { path = "../lru_cache" } +lazy_static = "1.4.0" +parking_lot = "0.12.0" diff --git a/common/unused_port/src/lib.rs b/common/unused_port/src/lib.rs index a5d0817211..386f08a739 100644 --- a/common/unused_port/src/lib.rs +++ b/common/unused_port/src/lib.rs @@ -1,4 +1,8 @@ -use std::net::{TcpListener, UdpSocket}; +use lazy_static::lazy_static; +use lru_cache::LRUTimeCache; +use parking_lot::Mutex; +use std::net::{SocketAddr, TcpListener, UdpSocket}; +use std::time::Duration; #[derive(Copy, Clone)] pub enum Transport { @@ -12,6 +16,13 @@ pub enum IpVersion { Ipv6, } +pub const CACHED_PORTS_TTL: Duration = Duration::from_secs(300); + +lazy_static! { + static ref FOUND_PORTS_CACHE: Mutex> = + Mutex::new(LRUTimeCache::new(CACHED_PORTS_TTL)); +} + /// A convenience wrapper over [`zero_port`]. pub fn unused_tcp4_port() -> Result { zero_port(Transport::Tcp, IpVersion::Ipv4) @@ -48,6 +59,20 @@ pub fn zero_port(transport: Transport, ipv: IpVersion) -> Result { IpVersion::Ipv6 => std::net::Ipv6Addr::LOCALHOST.into(), }; let socket_addr = std::net::SocketAddr::new(localhost, 0); + let mut unused_port: u16; + loop { + unused_port = find_unused_port(transport, socket_addr)?; + let mut cache_lock = FOUND_PORTS_CACHE.lock(); + if !cache_lock.contains(&unused_port) { + cache_lock.insert(unused_port); + break; + } + } + + Ok(unused_port) +} + +fn find_unused_port(transport: Transport, socket_addr: SocketAddr) -> Result { let local_addr = match transport { Transport::Tcp => { let listener = TcpListener::bind(socket_addr).map_err(|e| { From 56911231538da05248d3672bef5d772ab987fc8b Mon Sep 17 00:00:00 2001 From: int88 Date: Thu, 30 Mar 2023 10:14:07 +0000 Subject: [PATCH 10/17] update README of local_testnet (#4114) ## Issue Addressed NA ## Proposed Changes update the descriptions of README in `scripts/local_testnet`. ## Additional Info NA --- scripts/local_testnet/README.md | 2 +- scripts/local_testnet/start_local_testnet.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index c9fb387681..c4050ac934 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -20,7 +20,7 @@ Modify `vars.env` as desired. Start a local eth1 ganache server plus boot node along with `BN_COUNT` number of beacon nodes and `VC_COUNT` validator clients. -The `start_local_testnet.sh` script takes three options `-v VC_COUNT`, `-d DEBUG_LEVEL` and `-h` for help. +The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. The options may be in any order or absent in which case they take the default value specified. - VC_COUNT: the number of validator clients to create, default: `BN_COUNT` - DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info` diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index dcc0a5382a..e3aba5c3ad 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -28,7 +28,7 @@ while getopts "v:d:ph" flag; do echo "Options:" echo " -v: VC_COUNT default: $VC_COUNT" echo " -d: DEBUG_LEVEL default: info" - echo " -p: enable private tx proposals" + echo " -p: enable builder proposals" echo " -h: this help" exit ;; From c5383e393acee152e92641ce4699d05913953e70 Mon Sep 17 00:00:00 2001 From: chonghe Date: Fri, 31 Mar 2023 05:00:50 +0000 Subject: [PATCH 11/17] Update database-migrations.md (#4149) ## Issue Addressed Update the database-migrations to include v4.0.1 for database version v16: ## Proposed Changes Update the table by adding a row ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. --- book/src/database-migrations.md | 1 + 1 file changed, 1 insertion(+) diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index ef7e95cc7a..d2b7b518d7 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -28,6 +28,7 @@ validator client or the slasher**. | v3.3.0 | Nov 2022 | v13 | yes | | v3.4.0 | Jan 2023 | v13 | yes | | v3.5.0 | Feb 2023 | v15 | yes before Capella | +| v4.0.1 | Mar 2023 | v16 | yes before Capella | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). From 2de34510114da8c817c80c201dcf7a1610d61336 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 3 Apr 2023 03:02:55 +0000 Subject: [PATCH 12/17] Rate limiting backfill sync (#3936) ## Issue Addressed #3212 ## Proposed Changes - Introduce a new `rate_limiting_backfill_queue` - any new inbound backfill work events gets immediately sent to this FIFO queue **without any processing** - Spawn a `backfill_scheduler` routine that pops a backfill event from the FIFO queue at specified intervals (currently halfway through a slot, or at 6s after slot start for 12s slots) and sends the event to `BeaconProcessor` via a `scheduled_backfill_work_tx` channel - This channel gets polled last in the `InboundEvents`, and work event received is wrapped in a `InboundEvent::ScheduledBackfillWork` enum variant, which gets processed immediately or queued by the `BeaconProcessor` (existing logic applies from here) Diagram comparing backfill processing with / without rate-limiting: https://github.com/sigp/lighthouse/issues/3212#issuecomment-1386249922 See this comment for @paulhauner's explanation and solution: https://github.com/sigp/lighthouse/issues/3212#issuecomment-1384674956 ## Additional Info I've compared this branch (with backfill processing rate limited to to 1 and 3 batches per slot) against the latest stable version. The CPU usage during backfill sync is reduced by ~5% - 20%, more details on this page: https://hackmd.io/@jimmygchen/SJuVpJL3j The above testing is done on Goerli (as I don't currently have hardware for Mainnet), I'm guessing the differences are likely to be bigger on mainnet due to block size. ### TODOs - [x] Experiment with processing multiple batches per slot. (need to think about how to do this for different slot durations) - [x] Add option to disable rate-limiting, enabed by default. - [x] (No longer required now we're reusing the reprocessing queue) Complete the `backfill_scheduler` task when backfill sync is completed or not required --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +- beacon_node/beacon_chain/src/chain_config.rs | 3 + .../network/src/beacon_processor/mod.rs | 84 ++++++-- .../network/src/beacon_processor/tests.rs | 65 +++++- .../work_reprocessing_queue.rs | 204 ++++++++++++++++++ beacon_node/src/cli.rs | 8 + beacon_node/src/config.rs | 4 + common/slot_clock/src/lib.rs | 17 +- lighthouse/tests/beacon_node.rs | 13 ++ testing/ef_tests/src/cases/fork_choice.rs | 2 +- 10 files changed, 380 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 11eda4dead..d3c391e4ed 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2893,7 +2893,7 @@ impl BeaconChain { metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); let block_delay = self .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .ok_or(Error::UnableToComputeTimeAtSlot)?; fork_choice @@ -3746,7 +3746,7 @@ impl BeaconChain { let slot_delay = self .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .or_else(|| { warn!( self.log, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 1a53942562..c72c3d2cd4 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -68,6 +68,8 @@ pub struct ChainConfig { /// /// This is useful for block builders and testing. pub always_prepare_payload: bool, + /// Whether backfill sync processing should be rate-limited. + pub enable_backfill_rate_limiting: bool, } impl Default for ChainConfig { @@ -94,6 +96,7 @@ impl Default for ChainConfig { optimistic_finalized_sync: true, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, always_prepare_payload: false, + enable_backfill_rate_limiting: true, } } } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 61e3367e2f..9603205228 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -61,6 +61,7 @@ use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; +use tokio::sync::mpsc::error::TrySendError; use types::{ Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, @@ -77,7 +78,9 @@ mod tests; mod work_reprocessing_queue; mod worker; -use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; +use crate::beacon_processor::work_reprocessing_queue::{ + QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage, +}; pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; /// The maximum size of the channel for work events to the `BeaconProcessor`. @@ -218,6 +221,7 @@ pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_upd pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; +pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; @@ -738,6 +742,9 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::BackfillSync(QueuedBackfillBatch { process_id, blocks }) => { + WorkEvent::chain_segment(process_id, blocks) + } } } } @@ -893,6 +900,10 @@ impl Work { Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, + Work::ChainSegment { + process_id: ChainSegmentProcessId::BackSyncBatchId { .. }, + .. + } => CHAIN_SEGMENT_BACKFILL, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, @@ -1054,23 +1065,23 @@ impl BeaconProcessor { FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); + + let chain = match self.beacon_chain.upgrade() { + Some(chain) => chain, + // No need to proceed any further if the beacon chain has been dropped, the client + // is shutting down. + None => return, + }; + // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); - let work_reprocessing_tx = { - if let Some(chain) = self.beacon_chain.upgrade() { - spawn_reprocess_scheduler( - ready_work_tx, - &self.executor, - chain.slot_clock.clone(), - self.log.clone(), - ) - } else { - // No need to proceed any further if the beacon chain has been dropped, the client - // is shutting down. - return; - } - }; + let work_reprocessing_tx = spawn_reprocess_scheduler( + ready_work_tx, + &self.executor, + chain.slot_clock.clone(), + self.log.clone(), + ); let executor = self.executor.clone(); @@ -1083,12 +1094,55 @@ impl BeaconProcessor { reprocess_work_rx: ready_work_rx, }; + let enable_backfill_rate_limiting = chain.config.enable_backfill_rate_limiting; + loop { let work_event = match inbound_events.next().await { Some(InboundEvent::WorkerIdle) => { self.current_workers = self.current_workers.saturating_sub(1); None } + Some(InboundEvent::WorkEvent(event)) if enable_backfill_rate_limiting => { + match QueuedBackfillBatch::try_from(event) { + Ok(backfill_batch) => { + match work_reprocessing_tx + .try_send(ReprocessQueueMessage::BackfillSync(backfill_batch)) + { + Err(e) => { + warn!( + self.log, + "Unable to queue backfill work event. Will try to process now."; + "error" => %e + ); + match e { + TrySendError::Full(reprocess_queue_message) + | TrySendError::Closed(reprocess_queue_message) => { + match reprocess_queue_message { + ReprocessQueueMessage::BackfillSync( + backfill_batch, + ) => Some(backfill_batch.into()), + other => { + crit!( + self.log, + "Unexpected queue message type"; + "message_type" => other.as_ref() + ); + // This is an unhandled exception, drop the message. + continue; + } + } + } + } + } + Ok(..) => { + // backfill work sent to "reprocessing" queue. Process the next event. + continue; + } + } + } + Err(event) => Some(event), + } + } Some(InboundEvent::WorkEvent(event)) | Some(InboundEvent::ReprocessingWork(event)) => Some(event), None => { diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index eb66e434c9..b7c102ae11 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -9,7 +9,7 @@ use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use beacon_chain::{BeaconChain, ChainConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, @@ -23,8 +23,8 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, - SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, Epoch, EthSpec, MainnetEthSpec, ProposerSlashing, + SignedBeaconBlock, SignedVoluntaryExit, SubnetId, }; type E = MainnetEthSpec; @@ -70,6 +70,10 @@ impl Drop for TestRig { impl TestRig { pub async fn new(chain_length: u64) -> Self { + Self::new_with_chain_config(chain_length, ChainConfig::default()).await + } + + pub async fn new_with_chain_config(chain_length: u64, chain_config: ChainConfig) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = E::default_spec(); spec.shard_committee_period = 2; @@ -78,6 +82,7 @@ impl TestRig { .spec(spec) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() + .chain_config(chain_config) .build(); harness.advance_slot(); @@ -261,6 +266,14 @@ impl TestRig { self.beacon_processor_tx.try_send(event).unwrap(); } + pub fn enqueue_backfill_batch(&self) { + let event = WorkEvent::chain_segment( + ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), + Vec::default(), + ); + self.beacon_processor_tx.try_send(event).unwrap(); + } + pub fn enqueue_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); self.beacon_processor_tx @@ -873,3 +886,49 @@ async fn test_rpc_block_reprocessing() { // cache handle was dropped. assert_eq!(next_block_root, rig.head_root()); } + +/// Ensure that backfill batches get rate-limited and processing is scheduled at specified intervals. +#[tokio::test] +async fn test_backfill_sync_processing() { + let mut rig = TestRig::new(SMALL_CHAIN).await; + // Note: to verify the exact event times in an integration test is not straight forward here + // (not straight forward to manipulate `TestingSlotClock` due to cloning of `SlotClock` in code) + // and makes the test very slow, hence timing calculation is unit tested separately in + // `work_reprocessing_queue`. + for _ in 0..1 { + rig.enqueue_backfill_batch(); + // ensure queued batch is not processed until later + rig.assert_no_events_for(Duration::from_millis(100)).await; + // A new batch should be processed within a slot. + rig.assert_event_journal_with_timeout( + &[CHAIN_SEGMENT_BACKFILL, WORKER_FREED, NOTHING_TO_DO], + rig.chain.slot_clock.slot_duration(), + ) + .await; + } +} + +/// Ensure that backfill batches get processed as fast as they can when rate-limiting is disabled. +#[tokio::test] +async fn test_backfill_sync_processing_rate_limiting_disabled() { + let chain_config = ChainConfig { + enable_backfill_rate_limiting: false, + ..Default::default() + }; + let mut rig = TestRig::new_with_chain_config(SMALL_CHAIN, chain_config).await; + + for _ in 0..3 { + rig.enqueue_backfill_batch(); + } + + // ensure all batches are processed + rig.assert_event_journal_with_timeout( + &[ + CHAIN_SEGMENT_BACKFILL, + CHAIN_SEGMENT_BACKFILL, + CHAIN_SEGMENT_BACKFILL, + ], + Duration::from_millis(100), + ) + .await; +} diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 21fc2b6416..9f04d99725 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -11,21 +11,25 @@ //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown //! block will be re-queued until their block is imported, or until they expire. use super::MAX_SCHEDULED_WORK_QUEUE_LEN; +use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; use crate::metrics; use crate::sync::manager::BlockProcessType; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use fnv::FnvHashMap; use futures::task::Poll; use futures::{Stream, StreamExt}; +use itertools::Itertools; use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; +use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::Context; use std::time::Duration; +use strum::AsRefStr; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; @@ -65,7 +69,21 @@ const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; /// How many light client updates we keep before new ones get dropped. const MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES: usize = 128; +// Process backfill batch 50%, 60%, 80% through each slot. +// +// Note: use caution to set these fractions in a way that won't cause panic-y +// arithmetic. +pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [ + // One half: 6s on mainnet, 2.5s on Gnosis. + (1, 2), + // Three fifths: 7.2s on mainnet, 3s on Gnosis. + (3, 5), + // Four fifths: 9.6s on mainnet, 4s on Gnosis. + (4, 5), +]; + /// Messages that the scheduler can receive. +#[derive(AsRefStr)] pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. EarlyBlock(QueuedGossipBlock), @@ -84,6 +102,8 @@ pub enum ReprocessQueueMessage { UnknownBlockAggregate(QueuedAggregate), /// A light client optimistic update that references a parent root that has not been seen as a parent. UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), + /// A new backfill batch that needs to be scheduled for processing. + BackfillSync(QueuedBackfillBatch), } /// Events sent by the scheduler once they are ready for re-processing. @@ -93,6 +113,7 @@ pub enum ReadyWork { Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), LightClientUpdate(QueuedLightClientUpdate), + BackfillSync(QueuedBackfillBatch), } /// An Attestation for which the corresponding block was not seen while processing, queued for @@ -144,6 +165,40 @@ pub struct QueuedRpcBlock { pub should_process: bool, } +/// A backfill batch work that has been queued for processing later. +#[derive(Clone)] +pub struct QueuedBackfillBatch { + pub process_id: ChainSegmentProcessId, + pub blocks: Vec>>, +} + +impl TryFrom> for QueuedBackfillBatch { + type Error = WorkEvent; + + fn try_from(event: WorkEvent) -> Result> { + match event { + WorkEvent { + work: + Work::ChainSegment { + process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_), + blocks, + }, + .. + } => Ok(QueuedBackfillBatch { process_id, blocks }), + _ => Err(event), + } + } +} + +impl From> for WorkEvent { + fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent { + WorkEvent::chain_segment( + queued_backfill_batch.process_id, + queued_backfill_batch.blocks, + ) + } +} + /// Unifies the different messages processed by the block delay queue. enum InboundEvent { /// A gossip block that was queued for later processing and is ready for import. @@ -155,6 +210,8 @@ enum InboundEvent { ReadyAttestation(QueuedAttestationId), /// A light client update that is ready for re-processing. ReadyLightClientUpdate(QueuedLightClientUpdateId), + /// A backfill batch that was queued is ready for processing. + ReadyBackfillSync(QueuedBackfillBatch), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` @@ -191,6 +248,8 @@ struct ReprocessQueue { queued_lc_updates: FnvHashMap, DelayKey)>, /// Light Client Updates per parent_root. awaiting_lc_updates_per_parent_root: HashMap>, + /// Queued backfill batches + queued_backfill_batches: Vec>, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations @@ -200,6 +259,8 @@ struct ReprocessQueue { rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch, + next_backfill_batch_event: Option>>, + slot_clock: Pin>, } pub type QueuedLightClientUpdateId = usize; @@ -287,6 +348,20 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } + if let Some(next_backfill_batch_event) = self.next_backfill_batch_event.as_mut() { + match next_backfill_batch_event.as_mut().poll(cx) { + Poll::Ready(_) => { + let maybe_batch = self.queued_backfill_batches.pop(); + self.recompute_next_backfill_batch_event(); + + if let Some(batch) = maybe_batch { + return Poll::Ready(Some(InboundEvent::ReadyBackfillSync(batch))); + } + } + Poll::Pending => (), + } + } + // Last empty the messages channel. match self.work_reprocessing_rx.poll_recv(cx) { Poll::Ready(Some(message)) => return Poll::Ready(Some(InboundEvent::Msg(message))), @@ -323,12 +398,15 @@ pub fn spawn_reprocess_scheduler( queued_unaggregates: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), awaiting_lc_updates_per_parent_root: HashMap::new(), + queued_backfill_batches: Vec::new(), next_attestation: 0, next_lc_update: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), lc_update_delay_debounce: TimeLatch::default(), + next_backfill_batch_event: None, + slot_clock: Box::pin(slot_clock.clone()), }; executor.spawn( @@ -679,6 +757,14 @@ impl ReprocessQueue { } } } + InboundEvent::Msg(BackfillSync(queued_backfill_batch)) => { + self.queued_backfill_batches + .insert(0, queued_backfill_batch); + // only recompute if there is no `next_backfill_batch_event` already scheduled + if self.next_backfill_batch_event.is_none() { + self.recompute_next_backfill_batch_event(); + } + } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { let block_root = ready_block.block.block_root; @@ -786,6 +872,33 @@ impl ReprocessQueue { } } } + InboundEvent::ReadyBackfillSync(queued_backfill_batch) => { + let millis_from_slot_start = slot_clock + .millis_from_current_slot_start() + .map_or("null".to_string(), |duration| { + duration.as_millis().to_string() + }); + + debug!( + log, + "Sending scheduled backfill work"; + "millis_from_slot_start" => millis_from_slot_start + ); + + if self + .ready_work_tx + .try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone())) + .is_err() + { + error!( + log, + "Failed to send scheduled backfill work"; + "info" => "sending work back to queue" + ); + self.queued_backfill_batches + .insert(0, queued_backfill_batch); + } + } } metrics::set_gauge_vec( @@ -809,4 +922,95 @@ impl ReprocessQueue { self.lc_updates_delay_queue.len() as i64, ); } + + fn recompute_next_backfill_batch_event(&mut self) { + // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue + if !self.queued_backfill_batches.is_empty() { + self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( + ReprocessQueue::::duration_until_next_backfill_batch_event(&self.slot_clock), + ))); + } else { + self.next_backfill_batch_event = None + } + } + + /// Returns duration until the next scheduled processing time. The schedule ensure that backfill + /// processing is done in windows of time that aren't critical + fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration { + let slot_duration = slot_clock.slot_duration(); + slot_clock + .millis_from_current_slot_start() + .and_then(|duration_from_slot_start| { + BACKFILL_SCHEDULE_IN_SLOT + .into_iter() + // Convert fractions to seconds from slot start. + .map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier) + .find_or_first(|&event_duration_from_slot_start| { + event_duration_from_slot_start > duration_from_slot_start + }) + .map(|next_event_time| { + if duration_from_slot_start >= next_event_time { + // event is in the next slot, add duration to next slot + let duration_to_next_slot = slot_duration - duration_from_slot_start; + duration_to_next_slot + next_event_time + } else { + next_event_time - duration_from_slot_start + } + }) + }) + // If we can't read the slot clock, just wait another slot. + .unwrap_or(slot_duration) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::builder::Witness; + use beacon_chain::eth1_chain::CachingEth1Backend; + use slot_clock::TestingSlotClock; + use store::MemoryStore; + use types::MainnetEthSpec as E; + use types::Slot; + + type TestBeaconChainType = + Witness, E, MemoryStore, MemoryStore>; + + #[test] + fn backfill_processing_schedule_calculation() { + let slot_duration = Duration::from_secs(12); + let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), slot_duration); + let current_slot_start = slot_clock.start_of(Slot::new(100)).unwrap(); + slot_clock.set_current_time(current_slot_start); + + let event_times = BACKFILL_SCHEDULE_IN_SLOT + .map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier); + + for &event_duration_from_slot_start in event_times.iter() { + let duration_to_next_event = + ReprocessQueue::::duration_until_next_backfill_batch_event( + &slot_clock, + ); + + let current_time = slot_clock.millis_from_current_slot_start().unwrap(); + + assert_eq!( + duration_to_next_event, + event_duration_from_slot_start - current_time + ); + + slot_clock.set_current_time(current_slot_start + event_duration_from_slot_start) + } + + // check for next event beyond the current slot + let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); + let duration_to_next_event = + ReprocessQueue::::duration_until_next_backfill_batch_event( + &slot_clock, + ); + assert_eq!( + duration_to_next_event, + duration_to_next_slot + event_times[0] + ); + } } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 1e18493474..71d7d68c45 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -261,6 +261,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .min_values(0) .hidden(true) ) + .arg( + Arg::with_name("disable-backfill-rate-limiting") + .long("disable-backfill-rate-limiting") + .help("Disable the backfill sync rate-limiting. This allow users to just sync the entire chain as fast \ + as possible, however it can result in resource contention which degrades staking performance. Stakers \ + should generally choose to avoid this flag since backfill sync is not required for staking.") + .takes_value(false), + ) /* REST API related arguments */ .arg( Arg::with_name("http") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c77fa49b12..8799bdeeec 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -759,6 +759,10 @@ pub fn get_config( client_config.always_prefer_builder_payload = true; } + // Backfill sync rate-limiting + client_config.chain.enable_backfill_rate_limiting = + !cli_args.is_present("disable-backfill-rate-limiting"); + Ok(client_config) } diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 183f5c9313..1c8813ca2f 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -104,12 +104,23 @@ pub trait SlotClock: Send + Sync + Sized + Clone { self.slot_duration() * 2 / INTERVALS_PER_SLOT as u32 } - /// Returns the `Duration` since the start of the current `Slot`. Useful in determining whether to apply proposer boosts. - fn seconds_from_current_slot_start(&self, seconds_per_slot: u64) -> Option { + /// Returns the `Duration` since the start of the current `Slot` at seconds precision. Useful in determining whether to apply proposer boosts. + fn seconds_from_current_slot_start(&self) -> Option { self.now_duration() .and_then(|now| now.checked_sub(self.genesis_duration())) .map(|duration_into_slot| { - Duration::from_secs(duration_into_slot.as_secs() % seconds_per_slot) + Duration::from_secs(duration_into_slot.as_secs() % self.slot_duration().as_secs()) + }) + } + + /// Returns the `Duration` since the start of the current `Slot` at milliseconds precision. + fn millis_from_current_slot_start(&self) -> Option { + self.now_duration() + .and_then(|now| now.checked_sub(self.genesis_duration())) + .map(|duration_into_slot| { + Duration::from_millis( + (duration_into_slot.as_millis() % self.slot_duration().as_millis()) as u64, + ) }) } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 078bca95ef..a61d9cbf74 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1052,6 +1052,19 @@ fn disable_upnp_flag() { .with_config(|config| assert!(!config.network.upnp_enabled)); } #[test] +fn disable_backfill_rate_limiting_flag() { + CommandLineTest::new() + .flag("disable-backfill-rate-limiting", None) + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.enable_backfill_rate_limiting)); +} +#[test] +fn default_backfill_rate_limiting_flag() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.chain.enable_backfill_rate_limiting)); +} +#[test] fn default_boot_nodes() { let mainnet = vec![ // Lighthouse Team (Sigma Prime) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 7c3154a328..4f5d998301 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -425,7 +425,7 @@ impl Tester { .harness .chain .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .unwrap(); let result = self From e2c68c8893ac66c33ba42fcf8c2b40bd4574c5a0 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 3 Apr 2023 03:02:56 +0000 Subject: [PATCH 13/17] Add new validator API for voluntary exit (#4119) ## Issue Addressed Addresses #4117 ## Proposed Changes See https://github.com/ethereum/keymanager-APIs/pull/58 for proposed API specification. ## TODO - [x] ~~Add submission to BN~~ - removed, see discussion in [keymanager API](https://github.com/ethereum/keymanager-APIs/pull/58) - [x] ~~Add flag to allow voluntary exit via the API~~ - no longer needed now the VC doesn't submit exit directly - [x] ~~Additional verification / checks, e.g. if validator on same network as BN~~ - to be done on client side - [x] ~~Potentially wait for the message to propagate and return some exit information in the response~~ - not required - [x] Update http tests - [x] ~~Update lighthouse book~~ - not required if this endpoint makes it to the standard keymanager API Co-authored-by: Paul Hauner Co-authored-by: Jimmy Chen --- common/eth2/src/lighthouse_vc/http_client.rs | 24 +++++++ common/eth2/src/lighthouse_vc/types.rs | 5 ++ .../http_api/create_signed_voluntary_exit.rs | 69 +++++++++++++++++++ validator_client/src/http_api/mod.rs | 47 +++++++++++++ validator_client/src/http_api/tests.rs | 67 ++++++++++++++++-- validator_client/src/http_metrics/metrics.rs | 5 ++ validator_client/src/lib.rs | 5 +- validator_client/src/signing_method.rs | 3 + .../src/signing_method/web3signer.rs | 1 - validator_client/src/validator_store.rs | 39 ++++++++++- 10 files changed, 256 insertions(+), 9 deletions(-) create mode 100644 validator_client/src/http_api/create_signed_voluntary_exit.rs diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 88b5b68401..90c128751d 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -642,6 +642,30 @@ impl ValidatorClientHttpClient { let url = self.make_gas_limit_url(pubkey)?; self.delete_with_raw_response(url, &()).await } + + /// `POST /eth/v1/validator/{pubkey}/voluntary_exit` + pub async fn post_validator_voluntary_exit( + &self, + pubkey: &PublicKeyBytes, + epoch: Option, + ) -> Result { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("voluntary_exit"); + + if let Some(epoch) = epoch { + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + } + + self.post(path, &()).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response or a diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 92439337f6..fa5d4ae119 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -144,3 +144,8 @@ pub struct UpdateGasLimitRequest { #[serde(with = "eth2_serde_utils::quoted_u64")] pub gas_limit: u64, } + +#[derive(Deserialize)] +pub struct VoluntaryExitQuery { + pub epoch: Option, +} diff --git a/validator_client/src/http_api/create_signed_voluntary_exit.rs b/validator_client/src/http_api/create_signed_voluntary_exit.rs new file mode 100644 index 0000000000..b777d15806 --- /dev/null +++ b/validator_client/src/http_api/create_signed_voluntary_exit.rs @@ -0,0 +1,69 @@ +use crate::validator_store::ValidatorStore; +use bls::{PublicKey, PublicKeyBytes}; +use slog::{info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{Epoch, EthSpec, SignedVoluntaryExit, VoluntaryExit}; + +pub async fn create_signed_voluntary_exit( + pubkey: PublicKey, + maybe_epoch: Option, + validator_store: Arc>, + slot_clock: T, + log: Logger, +) -> Result { + let epoch = match maybe_epoch { + Some(epoch) => epoch, + None => get_current_epoch::(slot_clock).ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to determine current epoch".to_string()) + })?, + }; + + let pubkey_bytes = PublicKeyBytes::from(pubkey); + if !validator_store.has_validator(&pubkey_bytes) { + return Err(warp_utils::reject::custom_not_found(format!( + "{} is disabled or not managed by this validator client", + pubkey_bytes.as_hex_string() + ))); + } + + let validator_index = validator_store + .validator_index(&pubkey_bytes) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "The validator index for {} is not known. The validator client \ + may still be initializing or the validator has not yet had a \ + deposit processed.", + pubkey_bytes.as_hex_string() + )) + })?; + + let voluntary_exit = VoluntaryExit { + epoch, + validator_index, + }; + + info!( + log, + "Signing voluntary exit"; + "validator" => pubkey_bytes.as_hex_string(), + "epoch" => epoch + ); + + let signed_voluntary_exit = validator_store + .sign_voluntary_exit(pubkey_bytes, voluntary_exit) + .await + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to sign voluntary exit: {:?}", + e + )) + })?; + + Ok(signed_voluntary_exit) +} + +/// Calculates the current epoch from the genesis time and current time. +fn get_current_epoch(slot_clock: T) -> Option { + slot_clock.now().map(|s| s.epoch(E::slots_per_epoch())) +} diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index b87bb08381..15b3f9fe09 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,9 +1,11 @@ mod api_secret; +mod create_signed_voluntary_exit; mod create_validator; mod keystores; mod remotekeys; mod tests; +use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit; use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ mnemonic_from_phrase, @@ -71,6 +73,7 @@ pub struct Context { pub spec: ChainSpec, pub config: Config, pub log: Logger, + pub slot_clock: T, pub _phantom: PhantomData, } @@ -189,6 +192,9 @@ pub fn serve( let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); + let inner_slot_clock = ctx.slot_clock.clone(); + let slot_clock_filter = warp::any().map(move || inner_slot_clock.clone()); + let inner_spec = Arc::new(ctx.spec.clone()); let spec_filter = warp::any().map(move || inner_spec.clone()); @@ -904,6 +910,46 @@ pub fn serve( ) .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // POST /eth/v1/validator/{pubkey}/voluntary_exit + let post_validators_voluntary_exits = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("voluntary_exit")) + .and(warp::query::()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(slot_clock_filter) + .and(log_filter.clone()) + .and(signer.clone()) + .and(task_executor_filter.clone()) + .and_then( + |pubkey: PublicKey, + query: api_types::VoluntaryExitQuery, + validator_store: Arc>, + slot_clock: T, + log, + signer, + task_executor: TaskExecutor| { + blocking_signed_json_task(signer, move || { + if let Some(handle) = task_executor.handle() { + let signed_voluntary_exit = + handle.block_on(create_signed_voluntary_exit( + pubkey, + query.epoch, + validator_store, + slot_clock, + log, + ))?; + Ok(signed_voluntary_exit) + } else { + Err(warp_utils::reject::custom_server_error( + "Lighthouse shutting down".into(), + )) + } + }) + }, + ); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -1001,6 +1047,7 @@ pub fn serve( .or(post_validators_keystore) .or(post_validators_mnemonic) .or(post_validators_web3signer) + .or(post_validators_voluntary_exits) .or(post_fee_recipient) .or(post_gas_limit) .or(post_std_keystores) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index d453d7038a..df0e480444 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -45,6 +45,7 @@ struct ApiTester { initialized_validators: Arc>, validator_store: Arc>, url: SensitiveUrl, + slot_clock: TestingSlotClock, _server_shutdown: oneshot::Sender<()>, _validator_dir: TempDir, _runtime_shutdown: exit_future::Signal, @@ -90,8 +91,12 @@ impl ApiTester { let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); - let slot_clock = - TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let genesis_time: u64 = 0; + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(genesis_time), + Duration::from_secs(1), + ); let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); @@ -101,9 +106,9 @@ impl ApiTester { initialized_validators, slashing_protection, Hash256::repeat_byte(42), - spec, + spec.clone(), Some(Arc::new(DoppelgangerService::new(log.clone()))), - slot_clock, + slot_clock.clone(), &config, executor.clone(), log.clone(), @@ -129,7 +134,8 @@ impl ApiTester { listen_port: 0, allow_origin: None, }, - log, + log: log.clone(), + slot_clock: slot_clock.clone(), _phantom: PhantomData, }); let ctx = context.clone(); @@ -156,6 +162,7 @@ impl ApiTester { initialized_validators, validator_store, url, + slot_clock, _server_shutdown: shutdown_tx, _validator_dir: validator_dir, _runtime_shutdown: runtime_shutdown, @@ -494,6 +501,33 @@ impl ApiTester { self } + pub async fn test_sign_voluntary_exits(self, index: usize, maybe_epoch: Option) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + // manually setting validator index in `ValidatorStore` + self.initialized_validators + .write() + .set_index(&validator.voting_pubkey, 0); + + let expected_exit_epoch = maybe_epoch.unwrap_or_else(|| self.get_current_epoch()); + + let resp = self + .client + .post_validator_voluntary_exit(&validator.voting_pubkey, maybe_epoch) + .await; + + assert!(resp.is_ok()); + assert_eq!(resp.unwrap().message.epoch, expected_exit_epoch); + + self + } + + fn get_current_epoch(&self) -> Epoch { + self.slot_clock + .now() + .map(|s| s.epoch(E::slots_per_epoch())) + .unwrap() + } + pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; @@ -778,6 +812,29 @@ fn hd_validator_creation() { }); } +#[test] +fn validator_exit() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .test_sign_voluntary_exits(0, None) + .await + .test_sign_voluntary_exits(0, Some(Epoch::new(256))) + .await; + }); +} + #[test] fn validator_enabling() { let runtime = build_runtime(); diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index b4e400c3e7..8a52a4d35e 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -88,6 +88,11 @@ lazy_static::lazy_static! { "Total count of attempted SyncSelectionProof signings", &["status"] ); + pub static ref SIGNED_VOLUNTARY_EXITS_TOTAL: Result = try_create_int_counter_vec( + "vc_signed_voluntary_exits_total", + "Total count of VoluntaryExit signings", + &["status"] + ); pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result = try_create_int_counter_vec( "builder_validator_registrations_total", "Total count of ValidatorRegistrationData signings", diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 82cacccc60..556fdef26b 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -94,6 +94,7 @@ pub struct ProductionValidatorClient { doppelganger_service: Option>, preparation_service: PreparationService, validator_store: Arc>, + slot_clock: SystemTimeSlotClock, http_api_listen_addr: Option, config: Config, } @@ -461,7 +462,7 @@ impl ProductionValidatorClient { let sync_committee_service = SyncCommitteeService::new( duties_service.clone(), validator_store.clone(), - slot_clock, + slot_clock.clone(), beacon_nodes.clone(), context.service_context("sync_committee".into()), ); @@ -482,6 +483,7 @@ impl ProductionValidatorClient { preparation_service, validator_store, config, + slot_clock, http_api_listen_addr: None, }) } @@ -544,6 +546,7 @@ impl ProductionValidatorClient { graffiti_flag: self.config.graffiti, spec: self.context.eth2_config.spec.clone(), config: self.config.http_api.clone(), + slot_clock: self.slot_clock.clone(), log: log.clone(), _phantom: PhantomData, }); diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index ae9df08096..0de2f2f54f 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -47,6 +47,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullP }, SignedContributionAndProof(&'a ContributionAndProof), ValidatorRegistration(&'a ValidatorRegistrationData), + VoluntaryExit(&'a VoluntaryExit), } impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Payload> { @@ -67,6 +68,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Pay } => beacon_block_root.signing_root(domain), SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain), SignableMessage::ValidatorRegistration(v) => v.signing_root(domain), + SignableMessage::VoluntaryExit(exit) => exit.signing_root(domain), } } } @@ -203,6 +205,7 @@ impl SigningMethod { SignableMessage::ValidatorRegistration(v) => { Web3SignerObject::ValidatorRegistration(v) } + SignableMessage::VoluntaryExit(e) => Web3SignerObject::VoluntaryExit(e), }; // Determine the Web3Signer message type. diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 17e780304e..e907126faf 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -62,7 +62,6 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { RandaoReveal { epoch: Epoch, }, - #[allow(dead_code)] VoluntaryExit(&'a VoluntaryExit), SyncCommitteeMessage { beacon_block_root: Hash256, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 36a0d05734..73843579a2 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -22,8 +22,9 @@ use types::{ AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, - SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, + SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, + ValidatorRegistrationData, VoluntaryExit, }; use validator_dir::ValidatorDir; @@ -155,6 +156,14 @@ impl ValidatorStore { self.validators.clone() } + /// Indicates if the `voting_public_key` exists in self and is enabled. + pub fn has_validator(&self, voting_public_key: &PublicKeyBytes) -> bool { + self.validators + .read() + .validator(voting_public_key) + .is_some() + } + /// Insert a new validator to `self`, where the validator is represented by an EIP-2335 /// keystore on the filesystem. #[allow(clippy::too_many_arguments)] @@ -616,6 +625,32 @@ impl ValidatorStore { } } + pub async fn sign_voluntary_exit( + &self, + validator_pubkey: PublicKeyBytes, + voluntary_exit: VoluntaryExit, + ) -> Result { + let signing_epoch = voluntary_exit.epoch; + let signing_context = self.signing_context(Domain::VoluntaryExit, signing_epoch); + let signing_method = self.doppelganger_bypassed_signing_method(validator_pubkey)?; + + let signature = signing_method + .get_signature::>( + SignableMessage::VoluntaryExit(&voluntary_exit), + signing_context, + &self.spec, + &self.task_executor, + ) + .await?; + + metrics::inc_counter_vec(&metrics::SIGNED_VOLUNTARY_EXITS_TOTAL, &[metrics::SUCCESS]); + + Ok(SignedVoluntaryExit { + message: voluntary_exit, + signature, + }) + } + pub async fn sign_validator_registration_data( &self, validator_registration_data: ValidatorRegistrationData, From 311e69db65f0133fb3f9ce980d3f88202cea042d Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 3 Apr 2023 03:02:57 +0000 Subject: [PATCH 14/17] Ban peer race condition (#4140) It is possible that when we go to ban a peer, there is already an unbanned message in the queue. It could lead to the case that we ban and immediately unban a peer leaving us in a state where a should-be banned peer is unbanned. If this banned peer connects to us in this faulty state, we currently do not attempt to re-ban it. This PR does correct this also, so if we do see this error, it will now self-correct (although we shouldn't see the error in the first place). I have also incremented the severity of not supporting protocols as I see peers ultimately get banned in a few steps and it seems to make sense to just ban them outright, rather than have them linger. --- .../src/peer_manager/mod.rs | 23 +++++++++++++------ .../src/peer_manager/network_behaviour.rs | 4 +++- .../lighthouse_network/src/service/mod.rs | 2 +- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 3d5c862e8b..a461a12e53 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -290,11 +290,20 @@ impl PeerManager { // If a peer is being banned, this trumps any temporary ban the peer might be // under. We no longer track it in the temporary ban list. - self.temporary_banned_peers.raw_remove(peer_id); - - // Inform the Swarm to ban the peer - self.events - .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); + if !self.temporary_banned_peers.raw_remove(peer_id) { + // If the peer is not already banned, inform the Swarm to ban the peer + self.events + .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); + // If the peer was in the process of being un-banned, remove it (a rare race + // condition) + self.events.retain(|event| { + if let PeerManagerEvent::UnBanned(unbanned_peer_id, _) = event { + unbanned_peer_id != peer_id // Remove matching peer ids + } else { + true + } + }); + } } } } @@ -552,8 +561,8 @@ impl PeerManager { Protocol::BlocksByRoot => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, - Protocol::MetaData => PeerAction::LowToleranceError, - Protocol::Status => PeerAction::LowToleranceError, + Protocol::MetaData => PeerAction::Fatal, + Protocol::Status => PeerAction::Fatal, } } RPCError::StreamTimeout => match direction { diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index a29f243c9e..24de83a61d 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -156,8 +156,10 @@ impl PeerManager { BanResult::BadScore => { // This is a faulty state error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); - // Reban the peer + // Disconnect the peer. self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); + // Re-ban the peer to prevent repeated errors. + self.events.push(PeerManagerEvent::Banned(peer_id, vec![])); return; } BanResult::BannedIp(ip_addr) => { diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 5cdcdeaf85..dc9b44849f 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1119,7 +1119,7 @@ impl Network { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); self.peer_manager_mut().report_peer( &peer_id, - PeerAction::LowToleranceError, + PeerAction::Fatal, ReportSource::Gossipsub, Some(GoodbyeReason::Unknown), "does_not_support_gossipsub", From 1e029ce5384e911390a513e2d1885532f34a8b2b Mon Sep 17 00:00:00 2001 From: int88 Date: Mon, 3 Apr 2023 03:02:58 +0000 Subject: [PATCH 15/17] remove dup log (#4155) ## Issue Addressed NA ## Proposed Changes remove duplicate log message. ## Additional Info NA --- beacon_node/client/src/builder.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 5fa2fddc3e..d4b785cb11 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -347,12 +347,6 @@ where while block.slot() % slots_per_epoch != 0 { block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; - debug!( - context.log(), - "Searching for aligned checkpoint block"; - "block_slot" => block_slot, - ); - debug!( context.log(), "Searching for aligned checkpoint block"; From 8630ddfec4305789135b64ce4d78bd3e5a1c12ff Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 3 Apr 2023 05:35:11 +0000 Subject: [PATCH 16/17] Add `beacon.watch` (#3362) > This is currently a WIP and all features are subject to alteration or removal at any time. ## Overview The successor to #2873. Contains the backbone of `beacon.watch` including syncing code, the initial API, and several core database tables. See `watch/README.md` for more information, requirements and usage. --- Cargo.lock | 699 ++++++--- Cargo.toml | 2 + beacon_node/http_api/Cargo.toml | 8 +- beacon_node/http_api/src/lib.rs | 1 + .../{tests/common.rs => src/test_utils.rs} | 10 +- beacon_node/http_api/tests/fork_tests.rs | 2 +- .../http_api/tests/interactive_tests.rs | 2 +- beacon_node/http_api/tests/main.rs | 1 - beacon_node/http_api/tests/tests.rs | 6 +- common/eth2/src/lib.rs | 2 +- common/eth2/src/lighthouse.rs | 71 +- watch/.gitignore | 1 + watch/Cargo.toml | 45 + watch/README.md | 460 ++++++ watch/config.yaml.default | 49 + watch/diesel.toml | 5 + watch/migrations/.gitkeep | 0 .../down.sql | 6 + .../up.sql | 36 + .../down.sql | 1 + .../2022-01-01-000000_canonical_slots/up.sql | 6 + .../2022-01-01-000001_beacon_blocks/down.sql | 1 + .../2022-01-01-000001_beacon_blocks/up.sql | 7 + .../2022-01-01-000002_validators/down.sql | 1 + .../2022-01-01-000002_validators/up.sql | 7 + .../2022-01-01-000003_proposer_info/down.sql | 1 + .../2022-01-01-000003_proposer_info/up.sql | 5 + .../2022-01-01-000004_active_config/down.sql | 1 + .../2022-01-01-000004_active_config/up.sql | 5 + .../2022-01-01-000010_blockprint/down.sql | 1 + .../2022-01-01-000010_blockprint/up.sql | 4 + .../2022-01-01-000011_block_rewards/down.sql | 1 + .../2022-01-01-000011_block_rewards/up.sql | 6 + .../2022-01-01-000012_block_packing/down.sql | 1 + .../2022-01-01-000012_block_packing/up.sql | 6 + .../down.sql | 1 + .../up.sql | 8 + .../2022-01-01-000020_capella/down.sql | 2 + .../2022-01-01-000020_capella/up.sql | 3 + watch/postgres_docker_compose/compose.yml | 16 + watch/src/block_packing/database.rs | 140 ++ watch/src/block_packing/mod.rs | 38 + watch/src/block_packing/server.rs | 31 + watch/src/block_packing/updater.rs | 211 +++ watch/src/block_rewards/database.rs | 137 ++ watch/src/block_rewards/mod.rs | 38 + watch/src/block_rewards/server.rs | 31 + watch/src/block_rewards/updater.rs | 157 +++ watch/src/blockprint/config.rs | 40 + watch/src/blockprint/database.rs | 224 +++ watch/src/blockprint/mod.rs | 149 ++ watch/src/blockprint/server.rs | 31 + watch/src/blockprint/updater.rs | 172 +++ watch/src/cli.rs | 55 + watch/src/client.rs | 178 +++ watch/src/config.rs | 50 + watch/src/database/compat.rs | 49 + watch/src/database/config.rs | 74 + watch/src/database/error.rs | 55 + watch/src/database/mod.rs | 782 ++++++++++ watch/src/database/models.rs | 67 + watch/src/database/schema.rs | 102 ++ watch/src/database/utils.rs | 29 + watch/src/database/watch_types.rs | 119 ++ watch/src/lib.rs | 12 + watch/src/logger.rs | 24 + watch/src/main.rs | 41 + watch/src/server/config.rs | 28 + watch/src/server/error.rs | 50 + watch/src/server/handler.rs | 266 ++++ watch/src/server/mod.rs | 134 ++ watch/src/suboptimal_attestations/database.rs | 224 +++ watch/src/suboptimal_attestations/mod.rs | 56 + watch/src/suboptimal_attestations/server.rs | 299 ++++ watch/src/suboptimal_attestations/updater.rs | 236 ++++ watch/src/updater/config.rs | 65 + watch/src/updater/error.rs | 56 + watch/src/updater/handler.rs | 471 +++++++ watch/src/updater/mod.rs | 234 +++ watch/tests/tests.rs | 1254 +++++++++++++++++ 80 files changed, 7663 insertions(+), 236 deletions(-) rename beacon_node/http_api/{tests/common.rs => src/test_utils.rs} (96%) create mode 100644 watch/.gitignore create mode 100644 watch/Cargo.toml create mode 100644 watch/README.md create mode 100644 watch/config.yaml.default create mode 100644 watch/diesel.toml create mode 100644 watch/migrations/.gitkeep create mode 100644 watch/migrations/00000000000000_diesel_initial_setup/down.sql create mode 100644 watch/migrations/00000000000000_diesel_initial_setup/up.sql create mode 100644 watch/migrations/2022-01-01-000000_canonical_slots/down.sql create mode 100644 watch/migrations/2022-01-01-000000_canonical_slots/up.sql create mode 100644 watch/migrations/2022-01-01-000001_beacon_blocks/down.sql create mode 100644 watch/migrations/2022-01-01-000001_beacon_blocks/up.sql create mode 100644 watch/migrations/2022-01-01-000002_validators/down.sql create mode 100644 watch/migrations/2022-01-01-000002_validators/up.sql create mode 100644 watch/migrations/2022-01-01-000003_proposer_info/down.sql create mode 100644 watch/migrations/2022-01-01-000003_proposer_info/up.sql create mode 100644 watch/migrations/2022-01-01-000004_active_config/down.sql create mode 100644 watch/migrations/2022-01-01-000004_active_config/up.sql create mode 100644 watch/migrations/2022-01-01-000010_blockprint/down.sql create mode 100644 watch/migrations/2022-01-01-000010_blockprint/up.sql create mode 100644 watch/migrations/2022-01-01-000011_block_rewards/down.sql create mode 100644 watch/migrations/2022-01-01-000011_block_rewards/up.sql create mode 100644 watch/migrations/2022-01-01-000012_block_packing/down.sql create mode 100644 watch/migrations/2022-01-01-000012_block_packing/up.sql create mode 100644 watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql create mode 100644 watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql create mode 100644 watch/migrations/2022-01-01-000020_capella/down.sql create mode 100644 watch/migrations/2022-01-01-000020_capella/up.sql create mode 100644 watch/postgres_docker_compose/compose.yml create mode 100644 watch/src/block_packing/database.rs create mode 100644 watch/src/block_packing/mod.rs create mode 100644 watch/src/block_packing/server.rs create mode 100644 watch/src/block_packing/updater.rs create mode 100644 watch/src/block_rewards/database.rs create mode 100644 watch/src/block_rewards/mod.rs create mode 100644 watch/src/block_rewards/server.rs create mode 100644 watch/src/block_rewards/updater.rs create mode 100644 watch/src/blockprint/config.rs create mode 100644 watch/src/blockprint/database.rs create mode 100644 watch/src/blockprint/mod.rs create mode 100644 watch/src/blockprint/server.rs create mode 100644 watch/src/blockprint/updater.rs create mode 100644 watch/src/cli.rs create mode 100644 watch/src/client.rs create mode 100644 watch/src/config.rs create mode 100644 watch/src/database/compat.rs create mode 100644 watch/src/database/config.rs create mode 100644 watch/src/database/error.rs create mode 100644 watch/src/database/mod.rs create mode 100644 watch/src/database/models.rs create mode 100644 watch/src/database/schema.rs create mode 100644 watch/src/database/utils.rs create mode 100644 watch/src/database/watch_types.rs create mode 100644 watch/src/lib.rs create mode 100644 watch/src/logger.rs create mode 100644 watch/src/main.rs create mode 100644 watch/src/server/config.rs create mode 100644 watch/src/server/error.rs create mode 100644 watch/src/server/handler.rs create mode 100644 watch/src/server/mod.rs create mode 100644 watch/src/suboptimal_attestations/database.rs create mode 100644 watch/src/suboptimal_attestations/mod.rs create mode 100644 watch/src/suboptimal_attestations/server.rs create mode 100644 watch/src/suboptimal_attestations/updater.rs create mode 100644 watch/src/updater/config.rs create mode 100644 watch/src/updater/error.rs create mode 100644 watch/src/updater/handler.rs create mode 100644 watch/src/updater/mod.rs create mode 100644 watch/tests/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 214f3baa35..7a67b77bf9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -90,9 +90,9 @@ dependencies = [ [[package]] name = "aead" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c192eb8f11fc081b0fe4259ba5af04217d4e0faddd02417310a927911abd7c8" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", "generic-array", @@ -153,7 +153,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" dependencies = [ - "aead 0.5.1", + "aead 0.5.2", "aes 0.8.2", "cipher 0.4.4", "ctr 0.9.2", @@ -226,9 +226,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arbitrary" @@ -246,9 +246,9 @@ checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -296,7 +296,7 @@ checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -308,7 +308,7 @@ checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -320,7 +320,7 @@ checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -331,22 +331,22 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "async-io" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg 1.1.0", + "cfg-if", "concurrent-queue", "futures-lite", - "libc", "log", "parking", "polling", + "rustix", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", - "windows-sys 0.42.0", ] [[package]] @@ -377,18 +377,18 @@ checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "async-trait" -version = "0.1.66" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -452,7 +452,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -805,6 +805,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "bollard-stubs" +version = "1.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" +dependencies = [ + "chrono", + "serde", + "serde_with", +] + [[package]] name = "boot_node" version = "4.0.1" @@ -994,6 +1005,7 @@ dependencies = [ "js-sys", "num-integer", "num-traits", + "serde", "time 0.1.45", "wasm-bindgen", "winapi", @@ -1029,9 +1041,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -1112,9 +1124,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.49" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] @@ -1141,7 +1153,7 @@ name = "compare_fields_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1187,9 +1199,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "core2" @@ -1202,9 +1214,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" dependencies = [ "libc", ] @@ -1425,9 +1437,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.1" +version = "4.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" +checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" dependencies = [ "cfg-if", "fiat-crypto", @@ -1439,9 +1451,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" dependencies = [ "cc", "cxxbridge-flags", @@ -1451,9 +1463,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" dependencies = [ "cc", "codespan-reporting", @@ -1461,24 +1473,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn", + "syn 2.0.13", ] [[package]] name = "cxxbridge-flags" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -1512,7 +1524,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -1526,7 +1538,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -1537,7 +1549,7 @@ checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1548,7 +1560,7 @@ checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core 0.14.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1594,7 +1606,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" dependencies = [ "data-encoding", - "syn", + "syn 1.0.109", ] [[package]] @@ -1692,7 +1704,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1702,7 +1714,7 @@ source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce6 dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1723,7 +1735,7 @@ dependencies = [ "darling 0.14.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1733,7 +1745,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" dependencies = [ "derive_builder_core", - "syn", + "syn 1.0.109", ] [[package]] @@ -1746,7 +1758,44 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "diesel" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4391a22b19c916e50bec4d6140f29bdda3e3bb187223fe6e3ea0b6e4d1021c04" +dependencies = [ + "bitflags", + "byteorder", + "diesel_derives", + "itoa", + "pq-sys", + "r2d2", +] + +[[package]] +name = "diesel_derives" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad74fdcf086be3d4fdd142f67937678fe60ed431c3b2f08599e7687269410c4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "diesel_migrations" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9ae22beef5e9d6fab9225ddb073c1c6c1a7a6ded5019d5da11d1e5c5adc34e2" +dependencies = [ + "diesel", + "migrations_internals", + "migrations_macros", ] [[package]] @@ -1843,7 +1892,7 @@ dependencies = [ "rand 0.8.5", "rlp", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -1861,7 +1910,7 @@ checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2021,7 +2070,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2071,13 +2120,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.45.0", ] [[package]] @@ -2283,7 +2332,7 @@ dependencies = [ "eth2_ssz", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2632,9 +2681,9 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ace6ec7cc19c8ed33a32eaa9ea692d7faea05006b5356b9e2b668ec4bc3955" +checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "field-offset" @@ -2765,9 +2814,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -2780,9 +2829,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -2790,15 +2839,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -2808,9 +2857,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" @@ -2829,13 +2878,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -2851,15 +2900,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -2869,9 +2918,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -2896,9 +2945,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -2996,7 +3045,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3150,6 +3199,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -3352,7 +3407,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.9", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -3387,16 +3442,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0c17cc76786e99f8d2f055c11159e7f0091c42474dcc3189fbab96072e873e6d" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows 0.46.0", ] [[package]] @@ -3469,9 +3524,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7abdbb86e485125dad06c2691e1e393bf3b08c7b743b43aa162a00fd39062e" +checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" dependencies = [ "async-io", "core-foundation", @@ -3483,7 +3538,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows", + "windows 0.34.0", ] [[package]] @@ -3552,14 +3607,14 @@ checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", @@ -3625,10 +3680,11 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" dependencies = [ + "hermit-abi 0.3.1", "libc", "windows-sys 0.45.0", ] @@ -3639,7 +3695,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ - "socket2", + "socket2 0.4.9", "widestring 0.5.1", "winapi", "winreg", @@ -3647,9 +3703,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" @@ -3723,11 +3779,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", "pem", "ring", "serde", @@ -4021,7 +4077,7 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr 0.17.0", + "multiaddr 0.17.1", "multihash 0.17.0", "multistream-select 0.12.1", "once_cell", @@ -4103,17 +4159,16 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6c9cb71e2333d31f18e7556b9a5f1d0a2e013effc9325e36f436be65fe7bd2" +checksum = "8a8ea433ae0cea7e3315354305237b9897afe45278b2118a7a57ca744e70fd27" dependencies = [ "bs58", "ed25519-dalek", "log", - "multiaddr 0.17.0", + "multiaddr 0.17.1", "multihash 0.17.0", "prost", - "prost-build", "quick-protobuf", "rand 0.8.5", "thiserror", @@ -4134,7 +4189,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "trust-dns-proto", "void", @@ -4262,7 +4317,7 @@ checksum = "9d527d5827582abd44a6d80c07ff8b50b4ee238a8979e05998474179e79dc400" dependencies = [ "heck", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4277,7 +4332,7 @@ dependencies = [ "libc", "libp2p-core 0.38.0", "log", - "socket2", + "socket2 0.4.9", "tokio", ] @@ -4560,9 +4615,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" [[package]] name = "lmdb-rkv" @@ -4787,7 +4842,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -4807,6 +4862,27 @@ dependencies = [ "tracing", ] +[[package]] +name = "migrations_internals" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c493c09323068c01e54c685f7da41a9ccf9219735c3766fbfd6099806ea08fbc" +dependencies = [ + "serde", + "toml", +] + +[[package]] +name = "migrations_macros" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8ff27a350511de30cdabb77147501c36ef02e0451d957abea2f30caffb2b58" +dependencies = [ + "migrations_internals", + "proc-macro2", + "quote", +] + [[package]] name = "milagro_bls" version = "1.4.2" @@ -4821,9 +4897,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -4926,13 +5002,14 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.17.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b53e0cc5907a5c216ba6584bf74be8ab47d6d6289f72793b2dddbf15dc3bf8c" +checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" dependencies = [ "arrayref", "byteorder", "data-encoding", + "log", "multibase", "multihash 0.17.0", "percent-encoding", @@ -4989,7 +5066,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -5421,14 +5498,14 @@ dependencies = [ "bytes", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "openssl" -version = "0.10.48" +version = "0.10.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" +checksum = "4d2f106ab837a24e03672c59b1239669a0596406ff657c3c0835b6b7f0f35a33" dependencies = [ "bitflags", "cfg-if", @@ -5441,13 +5518,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -5458,20 +5535,19 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.1+1.1.1t" +version = "111.25.2+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ef9a9cc6ea7d9d5e7c4a913dc4b48d0e359eddf01af1dfec96ba7064b4aba10" +checksum = "320708a054ad9b3bf314688b5db87cf4d6683d64cfc835e2337924ae62bf4431" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.83" +version = "0.9.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" +checksum = "3a20eace9dc2d82904039cb76dcf50fb1a0bba071cfd1629720b5d6f1ddba0fa" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "openssl-src", @@ -5578,7 +5654,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5590,7 +5666,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5629,7 +5705,7 @@ dependencies = [ "cfg-if", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi", ] @@ -5642,7 +5718,7 @@ checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "windows-sys 0.45.0", ] @@ -5721,6 +5797,24 @@ dependencies = [ "rustc_version 0.4.0", ] +[[package]] +name = "phf" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.0.12" @@ -5738,7 +5832,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5866,6 +5960,35 @@ dependencies = [ "universal-hash 0.5.0", ] +[[package]] +name = "postgres-protocol" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" +dependencies = [ + "base64 0.21.0", + "byteorder", + "bytes", + "fallible-iterator", + "hmac 0.12.1", + "md-5", + "memchr", + "rand 0.8.5", + "sha2 0.10.6", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f028f05971fe20f512bcc679e2c10227e57809a3af86a7606304435bc8896cd6" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", +] + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -5873,13 +5996,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] -name = "prettyplease" -version = "0.1.24" +name = "pq-sys" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebcd279d20a4a0a2404a33056388e950504d891c855c7975b9a8fef75f3bf04" +checksum = "3b845d6d8ec554f972a2c5298aad68953fd64e7441e846075450b44656a016d1" +dependencies = [ + "vcpkg", +] + +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -5928,7 +6060,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -5951,9 +6083,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "1d0dd4be24fcdcfeaa12a432d588dc59bbad6cad3510c67e74a2b6b2fc950564" dependencies = [ "unicode-ident", ] @@ -6005,7 +6137,7 @@ checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6035,7 +6167,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn", + "syn 1.0.109", "tempfile", "which", ] @@ -6063,7 +6195,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6148,7 +6280,7 @@ checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6164,9 +6296,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4ced82a24bb281af338b9e8f94429b6eca01b4e66d899f40031f074e74c9" +checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" dependencies = [ "bytes", "rand 0.8.5", @@ -6358,6 +6490,15 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -6365,15 +6506,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom 0.2.8", - "redox_syscall", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -6391,15 +6532,15 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" dependencies = [ "base64 0.21.0", "bytes", @@ -6499,7 +6640,7 @@ checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6568,9 +6709,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" [[package]] name = "rustc-hash" @@ -6613,9 +6754,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.9" +version = "0.37.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +checksum = "d097081ed288dfe45699b72f5b5d648e5f15d64d900c7080273baa20c16a6849" dependencies = [ "bitflags", "errno", @@ -6712,9 +6853,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" +checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" dependencies = [ "cfg-if", "derive_more", @@ -6724,14 +6865,14 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" +checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6906,9 +7047,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.155" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71f2b4817415c6d4210bfe1c7bfcf4801b2d904cb4d0e1a8fdb651013c9e86b8" +checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" dependencies = [ "serde_derive", ] @@ -6935,20 +7076,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.155" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d071a94a3fac4aff69d023a7f411e33f40f3483f8c5190b1953822b6b76d7630" +checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] name = "serde_json" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" +checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" dependencies = [ "itoa", "ryu", @@ -6957,13 +7098,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395627de918015623b32e7669714206363a7fc00382bf477e72c1f7533e8eafc" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -6997,7 +7138,7 @@ dependencies = [ "darling 0.13.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7157,6 +7298,12 @@ dependencies = [ "types", ] +[[package]] +name = "siphasher" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" + [[package]] name = "slab" version = "0.4.8" @@ -7358,14 +7505,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" +checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.1", + "curve25519-dalek 4.0.0-rc.2", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -7383,6 +7530,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc8d618c6641ae355025c449427f9e96b98abf99a772be3cef6708d15c77147a" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "soketto" version = "0.7.1" @@ -7436,7 +7593,7 @@ source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28e dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7509,6 +7666,16 @@ dependencies = [ "types", ] +[[package]] +name = "stringprep" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "strsim" version = "0.8.0" @@ -7540,7 +7707,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn", + "syn 1.0.109", ] [[package]] @@ -7588,7 +7755,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -7602,7 +7769,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -7625,6 +7792,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c9da457c5285ac1f936ebd076af6dac17a61cfe7826f2076b4d015cf47bc8ec" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -7639,7 +7817,7 @@ checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unicode-xid", ] @@ -7732,15 +7910,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", "fastrand", - "redox_syscall", + "redox_syscall 0.3.5", "rustix", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -7776,7 +7954,24 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "testcontainers" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e2b1567ca8a2b819ea7b28c92be35d9f76fb9edb214321dcc86eb96023d1f87" +dependencies = [ + "bollard-stubs", + "futures", + "hex", + "hmac 0.12.1", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.10.6", ] [[package]] @@ -7790,22 +7985,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -7933,20 +8128,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.26.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" dependencies = [ "autocfg 1.1.0", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", "pin-project-lite 0.2.9", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", "windows-sys 0.45.0", ] @@ -7963,13 +8157,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -7982,6 +8176,30 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e89f6234aa8fd43779746012fcf53603cdb91fdd8399aa0de868c2d56b6dde1" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot 0.12.1", + "percent-encoding", + "phf", + "pin-project-lite 0.2.9", + "postgres-protocol", + "postgres-types", + "socket2 0.5.1", + "tokio", + "tokio-util 0.7.7", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -8154,7 +8372,7 @@ checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8222,7 +8440,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8246,7 +8464,7 @@ version = "0.4.0" dependencies = [ "darling 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8277,7 +8495,7 @@ dependencies = [ "lazy_static", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "thiserror", "tinyvec", "tokio", @@ -8466,9 +8684,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" @@ -8707,12 +8925,11 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -8813,7 +9030,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -8847,7 +9064,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8910,6 +9127,39 @@ dependencies = [ "web-sys", ] +[[package]] +name = "watch" +version = "0.1.0" +dependencies = [ + "axum", + "beacon_chain", + "beacon_node", + "bls", + "byteorder", + "clap", + "diesel", + "diesel_migrations", + "env_logger 0.9.3", + "eth2", + "hex", + "http_api", + "hyper", + "log", + "network", + "r2d2", + "rand 0.7.3", + "reqwest", + "serde", + "serde_json", + "serde_yaml", + "testcontainers", + "tokio", + "tokio-postgres", + "types", + "unused_port", + "url", +] + [[package]] name = "web-sys" version = "0.3.61" @@ -9114,7 +9364,7 @@ dependencies = [ "tokio", "webpki 0.21.4", "webrtc-util", - "x25519-dalek 2.0.0-pre.1", + "x25519-dalek 2.0.0-rc.2", "x509-parser 0.13.2", ] @@ -9149,7 +9399,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" dependencies = [ "log", - "socket2", + "socket2 0.4.9", "thiserror", "tokio", "webrtc-util", @@ -9300,6 +9550,15 @@ dependencies = [ "windows_x86_64_msvc 0.34.0", ] +[[package]] +name = "windows" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdacb41e6a96a052c6cb63a144f24900236121c6f63f4f8219fef5977ecb0c25" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-acl" version = "0.3.0" @@ -9479,12 +9738,13 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-pre.1" +version = "2.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.0.0-rc.2", "rand_core 0.6.4", + "serde", "zeroize", ] @@ -9574,23 +9834,22 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.13", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ba07de0443..0290f2ded9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,6 +87,8 @@ members = [ "validator_client", "validator_client/slashing_protection", + + "watch", ] resolver = "2" diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 0aa626be0c..e251b04856 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -38,15 +38,15 @@ system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } eth2_serde_utils = "0.1.1" operation_pool = { path = "../operation_pool" } +sensitive_url = { path = "../../common/sensitive_url" } +unused_port = {path = "../../common/unused_port"} +logging = { path = "../../common/logging" } +store = { path = "../store" } [dev-dependencies] -store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -sensitive_url = { path = "../../common/sensitive_url" } -logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } -unused_port = {path = "../../common/unused_port"} genesis = { path = "../genesis" } [[test]] diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index aa52466e26..d19187cb44 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -18,6 +18,7 @@ mod standard_block_rewards; mod state_id; mod sync_committee_rewards; mod sync_committees; +pub mod test_utils; mod ui; mod validator_inclusion; mod version; diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/src/test_utils.rs similarity index 96% rename from beacon_node/http_api/tests/common.rs rename to beacon_node/http_api/src/test_utils.rs index 3e34bafe84..6f918e1b9e 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,3 +1,4 @@ +use crate::{Config, Context}; use beacon_chain::{ test_utils::{ BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, @@ -6,7 +7,6 @@ use beacon_chain::{ }; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; -use http_api::{Config, Context}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, libp2p::{ @@ -182,7 +182,7 @@ pub async fn create_api_server_on_port( let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); - let context = Arc::new(Context { + let ctx = Arc::new(Context { config: Config { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -193,19 +193,19 @@ pub async fn create_api_server_on_port( data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, }, - chain: Some(chain.clone()), + chain: Some(chain), network_senders: Some(network_senders), network_globals: Some(network_globals), eth1_service: Some(eth1_service), log, }); - let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); let server_shutdown = async { // It's not really interesting why this triggered, just that it happened. let _ = shutdown_rx.await; }; - let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap(); ApiServer { server, diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 6144123565..8a3ba887b3 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,11 +1,11 @@ //! Tests for API behaviour across fork boundaries. -use crate::common::*; use beacon_chain::{ test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, StateSkipConfig, }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use http_api::test_utils::*; use std::collections::HashSet; use types::{ test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 7db1b22d67..9763b8037b 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,11 +1,11 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` -use crate::common::*; use beacon_chain::{ chain_config::ReOrgThreshold, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, }; use eth2::types::DepositContractData; use execution_layer::{ForkchoiceState, PayloadAttributes}; +use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; use slot_clock::SlotClock; use state_processing::{ diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs index 88e0032ecd..342b72cc7d 100644 --- a/beacon_node/http_api/tests/main.rs +++ b/beacon_node/http_api/tests/main.rs @@ -1,6 +1,5 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. -pub mod common; pub mod fork_tests; pub mod interactive_tests; pub mod tests; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2f1d5fd587..0ef27febea 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,4 +1,3 @@ -use crate::common::{create_api_server, create_api_server_on_port, ApiServer}; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, @@ -18,7 +17,10 @@ use execution_layer::test_utils::{ }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; -use http_api::{BlockId, StateId}; +use http_api::{ + test_utils::{create_api_server, create_api_server_on_port, ApiServer}, + BlockId, StateId, +}; use lighthouse_network::{Enr, EnrExt, PeerId}; use network::NetworkReceivers; use proto_array::ExecutionStatus; diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index d4f68624fe..1a7cf29790 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -22,7 +22,7 @@ use lighthouse_network::PeerId; pub use reqwest; use reqwest::{IntoUrl, RequestBuilder, Response}; pub use reqwest::{StatusCode, Url}; -pub use sensitive_url::SensitiveUrl; +pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; use std::convert::TryFrom; use std::fmt; diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index e50d9f4dc0..bb933dbe12 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -13,7 +13,7 @@ use crate::{ BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, GenericResponse, ValidatorId, }, - BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, + BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, StateId, StatusCode, }; use proto_array::core::ProtoArray; use reqwest::IntoUrl; @@ -566,4 +566,73 @@ impl BeaconNodeHttpClient { self.post_with_response(path, &()).await } + + /// + /// Analysis endpoints. + /// + + /// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot + pub async fn get_lighthouse_analysis_block_rewards( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_rewards"); + + path.query_pairs_mut() + .append_pair("start_slot", &start_slot.to_string()) + .append_pair("end_slot", &end_slot.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_block_packing( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_packing_efficiency"); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/attestation_performance/{index}?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_attestation_performance( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + target: String, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("attestation_performance") + .push(&target); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } } diff --git a/watch/.gitignore b/watch/.gitignore new file mode 100644 index 0000000000..5b6b0720c9 --- /dev/null +++ b/watch/.gitignore @@ -0,0 +1 @@ +config.yaml diff --git a/watch/Cargo.toml b/watch/Cargo.toml new file mode 100644 index 0000000000..d1793a9d06 --- /dev/null +++ b/watch/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "watch" +version = "0.1.0" +edition = "2018" + +[lib] +name = "watch" +path = "src/lib.rs" + +[[bin]] +name = "watch" +path = "src/main.rs" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33.3" +log = "0.4.14" +env_logger = "0.9.0" +types = { path = "../consensus/types" } +eth2 = { path = "../common/eth2" } +beacon_node = { path = "../beacon_node"} +tokio = { version = "1.14.0", features = ["time"] } +axum = "0.5.15" +hyper = "0.14.20" +serde = "1.0.116" +serde_json = "1.0.58" +reqwest = { version = "0.11.0", features = ["json","stream"] } +url = "2.2.2" +rand = "0.7.3" +diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } +diesel_migrations = { version = "2.0.0", features = ["postgres"] } +byteorder = "1.4.3" +bls = { path = "../crypto/bls" } +hex = "0.4.2" +r2d2 = "0.8.9" +serde_yaml = "0.8.24" + +[dev-dependencies] +tokio-postgres = "0.7.5" +http_api = { path = "../beacon_node/http_api" } +beacon_chain = { path = "../beacon_node/beacon_chain" } +network = { path = "../beacon_node/network" } +testcontainers = "0.14.0" +unused_port = { path = "../common/unused_port" } diff --git a/watch/README.md b/watch/README.md new file mode 100644 index 0000000000..18bf393946 --- /dev/null +++ b/watch/README.md @@ -0,0 +1,460 @@ +## beacon.watch + +>beacon.watch is pre-MVP and still under active development and subject to change. + +beacon.watch is an Ethereum Beacon Chain monitoring platform whose goal is to provide fast access to +data which is: +1. Not already stored natively in the Beacon Chain +2. Too specialized for Block Explorers +3. Too sensitive for public Block Explorers + + +### Requirements +- `git` +- `rust` : https://rustup.rs/ +- `libpg` : https://www.postgresql.org/download/ +- `diesel_cli` : +``` +cargo install diesel_cli --no-default-features --features postgres +``` +- `docker` : https://docs.docker.com/engine/install/ +- `docker-compose` : https://docs.docker.com/compose/install/ + +### Setup +1. Setup the database: +``` +cd postgres_docker_compose +docker-compose up +``` + +1. Ensure the tests pass: +``` +cargo test --release +``` + +1. Drop the database (if it already exists) and run the required migrations: +``` +diesel database reset --database-url postgres://postgres:postgres@localhost/dev +``` + +1. Ensure a synced Lighthouse beacon node with historical states is available +at `localhost:5052`. +The smaller the value of `--slots-per-restore-point` the faster beacon.watch +will be able to sync to the beacon node. + +1. Run the updater daemon: +``` +cargo run --release -- run-updater +``` + +1. Start the HTTP API server: +``` +cargo run --release -- serve +``` + +1. Ensure connectivity: +``` +curl "http://localhost:5059/v1/slots/highest" +``` + +> Functionality on MacOS has not been tested. Windows is not supported. + + +### Configuration +beacon.watch can be configured through the use of a config file. +Available options can be seen in `config.yaml.default`. + +You can specify a config file during runtime: +``` +cargo run -- run-updater --config path/to/config.yaml +cargo run -- serve --config path/to/config.yaml +``` + +You can specify only the parts of the config file which you need changed. +Missing values will remain as their defaults. + +For example, if you wish to run with default settings but only wish to alter `log_level` +your config file would be: +```yaml +# config.yaml +log_level = "info" +``` + +### Available Endpoints +As beacon.watch continues to develop, more endpoints will be added. + +> In these examples any data containing information from blockprint has either been redacted or fabricated. + +#### `/v1/slots/{slot}` +```bash +curl "http://localhost:5059/v1/slots/4635296" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/slots?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "skipped": false, + "beacon_block": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + } +] +``` + +#### `/v1/slots/lowest` +```bash +curl "http://localhost:5059/v1/slots/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots/highest` +```bash +curl "http://localhost:5059/v1/slots/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "skipped": false, + "beacon_block": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b" +} +``` + +#### `v1/slots/{slot}/block` +```bash +curl "http://localhost:5059/v1/slots/4635296/block" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}` +```bash +curl "http://localhost:5059/v1/blocks/4635296" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/blocks?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" + } +] +``` + +#### `/v1/blocks/{block_id}/previous` +```bash +curl "http://localhost:5059/v1/blocks/4635297/previous" +# OR +curl "http://localhost:5059/v1/blocks/0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182/previous" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}/next` +```bash +curl "http://localhost:5059/v1/blocks/4635296/next" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/next" +``` +```json +{ + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/blocks/lowest` +```bash +curl "http://localhost:5059/v1/blocks/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/highest` +```bash +curl "http://localhost:5059/v1/blocks/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "parent_root": "0xb66e05418bb5b1d4a965c994e1f0e5b5f0d7b780e0df12f3f6321510654fa1d2" +} +``` + +#### `/v1/blocks/{block_id}/proposer` +```bash +curl "http://localhost:5059/v1/blocks/4635296/proposer" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/proposer" + +``` +```json +{ + "slot": "4635296", + "proposer_index": 223126, + "graffiti": "" +} +``` + +#### `/v1/blocks/{block_id}/rewards` +```bash +curl "http://localhost:5059/v1/blocks/4635296/reward" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/reward" + +``` +```json +{ + "slot": "4635296", + "total": 25380059, + "attestation_reward": 24351867, + "sync_committee_reward": 1028192 +} +``` + +#### `/v1/blocks/{block_id}/packing` +```bash +curl "http://localhost:5059/v1/blocks/4635296/packing" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/packing" + +``` +```json +{ + "slot": "4635296", + "available": 16152, + "included": 13101, + "prior_skip_slots": 0 +} +``` + +#### `/v1/validators/{validator}` +```bash +curl "http://localhost:5059/v1/validators/1" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c" +``` +```json +{ + "index": 1, + "public_key": "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c", + "status": "active_ongoing", + "client": null, + "activation_epoch": 0, + "exit_epoch": null +} +``` + +#### `/v1/validators/{validator}/attestation/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/1/attestation/144853" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c/attestation/144853" +``` +```json +{ + "index": 1, + "epoch": "144853", + "source": true, + "head": true, + "target": true +} +``` + +#### `/v1/validators/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853" +``` +```json +[ + 63, + 67, + 98, + ... +] +``` + +#### `/v1/validators/missed/{vote}/{epoch}/graffiti` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853/graffiti" +``` +```json +{ + "Mr F was here": 3, + "Lighthouse/v3.1.0-aa022f4": 5, + ... +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/clients/missed/source/144853" +``` +```json +{ + "Lighthouse": 100, + "Lodestar": 100, + "Nimbus": 100, + "Prysm": 100, + "Teku": 100, + "Unknown": 100 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages` +Note that this endpoint expresses the following: +``` +What percentage of each client implementation missed this vote? +``` + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages" +``` +```json +{ + "Lighthouse": 0.51234567890, + "Lodestar": 0.51234567890, + "Nimbus": 0.51234567890, + "Prysm": 0.09876543210, + "Teku": 0.09876543210, + "Unknown": 0.05647382910 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages/relative` +Note that this endpoint expresses the following: +``` +For the validators which did miss this vote, what percentage of them were from each client implementation? +``` +You can check these values against the output of `/v1/clients/percentages` to see any discrepancies. + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages/relative" +``` +```json +{ + "Lighthouse": 11.11111111111111, + "Lodestar": 11.11111111111111, + "Nimbus": 11.11111111111111, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 33.33333333333333 +} + +``` + +#### `/v1/clients` +```bash +curl "http://localhost:5059/v1/clients" +``` +```json +{ + "Lighthouse": 5000, + "Lodestar": 5000, + "Nimbus": 5000, + "Prysm": 5000, + "Teku": 5000, + "Unknown": 5000 +} +``` + +#### `/v1/clients/percentages` +```bash +curl "http://localhost:5059/v1/clients/percentages" +``` +```json +{ + "Lighthouse": 16.66666666666667, + "Lodestar": 16.66666666666667, + "Nimbus": 16.66666666666667, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 16.66666666666667 +} +``` + +### Future work +- New tables + - `skip_slots`? + + +- More API endpoints + - `/v1/proposers?start_epoch={}&end_epoch={}` and similar + - `/v1/validators/{status}/count` + + +- Concurrently backfill and forwards fill, so forwards fill is not bottlenecked by large backfills. + + +- Better/prettier (async?) logging. + + +- Connect to a range of beacon_nodes to sync different components concurrently. +Generally, processing certain api queries such as `block_packing` and `attestation_performance` take the longest to sync. + + +### Architecture +Connection Pooling: +- 1 Pool for Updater (read and write) +- 1 Pool for HTTP Server (should be read only, although not sure if we can enforce this) diff --git a/watch/config.yaml.default b/watch/config.yaml.default new file mode 100644 index 0000000000..131609237c --- /dev/null +++ b/watch/config.yaml.default @@ -0,0 +1,49 @@ +--- +database: + user: "postgres" + password: "postgres" + dbname: "dev" + default_dbname: "postgres" + host: "localhost" + port: 5432 + connect_timeout_millis: 2000 + +server: + listen_addr: "127.0.0.1" + listen_port: 5059 + +updater: + # The URL of the Beacon Node to perform sync tasks with. + # Cannot yet accept multiple beacon nodes. + beacon_node_url: "http://localhost:5052" + # The number of epochs to backfill. Must be below 100. + max_backfill_size_epochs: 2 + # The epoch at which to stop backfilling. + backfill_stop_epoch: 0 + # Whether to sync the attestations table. + attestations: true + # Whether to sync the proposer_info table. + proposer_info: true + # Whether to sync the block_rewards table. + block_rewards: true + # Whether to sync the block_packing table. + block_packing: true + +blockprint: + # Whether to sync client information from blockprint. + enabled: false + # The URL of the blockprint server. + url: "" + # The username used to authenticate to the blockprint server. + username: "" + # The password used to authenticate to the blockprint server. + password: "" + +# Log level. +# Valid options are: +# - "trace" +# - "debug" +# - "info" +# - "warn" +# - "error" +log_level: "debug" diff --git a/watch/diesel.toml b/watch/diesel.toml new file mode 100644 index 0000000000..bfb01bccf0 --- /dev/null +++ b/watch/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/database/schema.rs" diff --git a/watch/migrations/.gitkeep b/watch/migrations/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/watch/migrations/00000000000000_diesel_initial_setup/down.sql b/watch/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 0000000000..a9f5260911 --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/watch/migrations/00000000000000_diesel_initial_setup/up.sql b/watch/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 0000000000..d68895b1a7 --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/down.sql b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql new file mode 100644 index 0000000000..551ed6605c --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql @@ -0,0 +1 @@ +DROP TABLE canonical_slots diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/up.sql b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql new file mode 100644 index 0000000000..2629f11a4c --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE canonical_slots ( + slot integer PRIMARY KEY, + root bytea NOT NULL, + skipped boolean NOT NULL, + beacon_block bytea UNIQUE +) diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql new file mode 100644 index 0000000000..8901956f47 --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql @@ -0,0 +1 @@ +DROP TABLE beacon_blocks diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql new file mode 100644 index 0000000000..250c667b23 --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE beacon_blocks ( + slot integer PRIMARY KEY REFERENCES canonical_slots(slot) ON DELETE CASCADE, + root bytea REFERENCES canonical_slots(beacon_block) NOT NULL, + parent_root bytea NOT NULL, + attestation_count integer NOT NULL, + transaction_count integer +) diff --git a/watch/migrations/2022-01-01-000002_validators/down.sql b/watch/migrations/2022-01-01-000002_validators/down.sql new file mode 100644 index 0000000000..17819fc349 --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/down.sql @@ -0,0 +1 @@ +DROP TABLE validators diff --git a/watch/migrations/2022-01-01-000002_validators/up.sql b/watch/migrations/2022-01-01-000002_validators/up.sql new file mode 100644 index 0000000000..69cfef6772 --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE validators ( + index integer PRIMARY KEY, + public_key bytea NOT NULL, + status text NOT NULL, + activation_epoch integer, + exit_epoch integer +) diff --git a/watch/migrations/2022-01-01-000003_proposer_info/down.sql b/watch/migrations/2022-01-01-000003_proposer_info/down.sql new file mode 100644 index 0000000000..d61330be5b --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/down.sql @@ -0,0 +1 @@ +DROP TABLE proposer_info diff --git a/watch/migrations/2022-01-01-000003_proposer_info/up.sql b/watch/migrations/2022-01-01-000003_proposer_info/up.sql new file mode 100644 index 0000000000..488aedb273 --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE proposer_info ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + proposer_index integer REFERENCES validators(index) ON DELETE CASCADE NOT NULL, + graffiti text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000004_active_config/down.sql b/watch/migrations/2022-01-01-000004_active_config/down.sql new file mode 100644 index 0000000000..b4304eb7b7 --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/down.sql @@ -0,0 +1 @@ +DROP TABLE active_config diff --git a/watch/migrations/2022-01-01-000004_active_config/up.sql b/watch/migrations/2022-01-01-000004_active_config/up.sql new file mode 100644 index 0000000000..476a091160 --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE active_config ( + id integer PRIMARY KEY CHECK (id=1), + config_name text NOT NULL, + slots_per_epoch integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000010_blockprint/down.sql b/watch/migrations/2022-01-01-000010_blockprint/down.sql new file mode 100644 index 0000000000..fa53325dad --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/down.sql @@ -0,0 +1 @@ +DROP TABLE blockprint diff --git a/watch/migrations/2022-01-01-000010_blockprint/up.sql b/watch/migrations/2022-01-01-000010_blockprint/up.sql new file mode 100644 index 0000000000..2d5741f50b --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/up.sql @@ -0,0 +1,4 @@ +CREATE TABLE blockprint ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + best_guess text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000011_block_rewards/down.sql b/watch/migrations/2022-01-01-000011_block_rewards/down.sql new file mode 100644 index 0000000000..2dc87995c7 --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/down.sql @@ -0,0 +1 @@ +DROP TABLE block_rewards diff --git a/watch/migrations/2022-01-01-000011_block_rewards/up.sql b/watch/migrations/2022-01-01-000011_block_rewards/up.sql new file mode 100644 index 0000000000..47cb4304f0 --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_rewards ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + total integer NOT NULL, + attestation_reward integer NOT NULL, + sync_committee_reward integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000012_block_packing/down.sql b/watch/migrations/2022-01-01-000012_block_packing/down.sql new file mode 100644 index 0000000000..e9e7755e3e --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/down.sql @@ -0,0 +1 @@ +DROP TABLE block_packing diff --git a/watch/migrations/2022-01-01-000012_block_packing/up.sql b/watch/migrations/2022-01-01-000012_block_packing/up.sql new file mode 100644 index 0000000000..63a9925f92 --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_packing ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + available integer NOT NULL, + included integer NOT NULL, + prior_skip_slots integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql new file mode 100644 index 0000000000..0f32b6b4f3 --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql @@ -0,0 +1 @@ +DROP TABLE suboptimal_attestations diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql new file mode 100644 index 0000000000..5352afefc8 --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql @@ -0,0 +1,8 @@ +CREATE TABLE suboptimal_attestations ( + epoch_start_slot integer CHECK (epoch_start_slot % 32 = 0) REFERENCES canonical_slots(slot) ON DELETE CASCADE, + index integer NOT NULL REFERENCES validators(index) ON DELETE CASCADE, + source boolean NOT NULL, + head boolean NOT NULL, + target boolean NOT NULL, + PRIMARY KEY(epoch_start_slot, index) +) diff --git a/watch/migrations/2022-01-01-000020_capella/down.sql b/watch/migrations/2022-01-01-000020_capella/down.sql new file mode 100644 index 0000000000..5903b351db --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/down.sql @@ -0,0 +1,2 @@ +ALTER TABLE beacon_blocks +DROP COLUMN withdrawal_count; diff --git a/watch/migrations/2022-01-01-000020_capella/up.sql b/watch/migrations/2022-01-01-000020_capella/up.sql new file mode 100644 index 0000000000..b52b4b0099 --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/up.sql @@ -0,0 +1,3 @@ +ALTER TABLE beacon_blocks +ADD COLUMN withdrawal_count integer; + diff --git a/watch/postgres_docker_compose/compose.yml b/watch/postgres_docker_compose/compose.yml new file mode 100644 index 0000000000..eae4de4a2b --- /dev/null +++ b/watch/postgres_docker_compose/compose.yml @@ -0,0 +1,16 @@ +version: "3" + +services: + postgres: + image: postgres:12.3-alpine + restart: always + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_USER: postgres + volumes: + - postgres:/var/lib/postgresql/data + ports: + - 127.0.0.1:5432:5432 + +volumes: + postgres: diff --git a/watch/src/block_packing/database.rs b/watch/src/block_packing/database.rs new file mode 100644 index 0000000000..f7375431cb --- /dev/null +++ b/watch/src/block_packing/database.rs @@ -0,0 +1,140 @@ +use crate::database::{ + schema::{beacon_blocks, block_packing}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_packing)] +pub struct WatchBlockPacking { + pub slot: WatchSlot, + pub available: i32, + pub included: i32, + pub prior_skip_slots: i32, +} + +/// Insert a batch of values into the `block_packing` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_packing( + conn: &mut PgConn, + packing: Vec, +) -> Result<(), Error> { + use self::block_packing::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in packing.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_packing) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block packing inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_packing` table where `slot` is minimum. +pub fn get_lowest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_packing` table where `slot` is maximum. +pub fn get_highest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `root_query`. +pub fn get_block_packing_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_packing); + + let result = join + .select((slot, available, included, prior_skip_slots)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `slot_query`. +pub fn get_block_packing_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_packing`. +#[allow(dead_code)] +pub fn get_unknown_block_packing( + conn: &mut PgConn, + slots_per_epoch: u64, +) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_packing::dsl::block_packing; + + let join = beacon_blocks.left_join(block_packing); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block packing cannot be retrieved for epoch 0 so we need to exclude them. + .filter(slot.ge(slots_per_epoch as i32)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_packing/mod.rs b/watch/src/block_packing/mod.rs new file mode 100644 index 0000000000..5d74fc5979 --- /dev/null +++ b/watch/src/block_packing/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; +pub use server::block_packing_routes; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/block_packing`. +/// Formats the response into a vector of `WatchBlockPacking`. +/// +/// Will fail if `start_epoch == 0`. +pub async fn get_block_packing( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_packing(start_epoch, end_epoch) + .await? + .into_iter() + .map(|data| WatchBlockPacking { + slot: WatchSlot::from_slot(data.slot), + available: data.available_attestations as i32, + included: data.included_attestations as i32, + prior_skip_slots: data.prior_skip_slots as i32, + }) + .collect()) +} diff --git a/watch/src/block_packing/server.rs b/watch/src/block_packing/server.rs new file mode 100644 index 0000000000..819144562a --- /dev/null +++ b/watch/src/block_packing/server.rs @@ -0,0 +1,31 @@ +use crate::block_packing::database::{ + get_block_packing_by_root, get_block_packing_by_slot, WatchBlockPacking, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_packing( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_packing_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_packing_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_packing_routes() -> Router { + Router::new().route("/v1/blocks/:block/packing", get(get_block_packing)) +} diff --git a/watch/src/block_packing/updater.rs b/watch/src/block_packing/updater.rs new file mode 100644 index 0000000000..215964901a --- /dev/null +++ b/watch/src/block_packing/updater.rs @@ -0,0 +1,211 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_packing::get_block_packing; + +use eth2::types::{Epoch, EthSpec}; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `block_packing` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_packing` API with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest beacon block) + /// `end_epoch` -> epoch of highest beacon block + /// + /// It will resync the latest epoch if it is not fully filled. + /// That is, `if highest_filled_slot % slots_per_epoch != 31` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn fill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_packing` table. + let highest_filled_slot_opt = if self.config.block_packing { + database::get_highest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let mut start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot.as_slot() % self.slots_per_epoch + == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = database::get_lowest_beacon_block(&mut conn)? { + lowest_beacon_block + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not fill the `block_packing` table. + warn!("Refusing to fill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `get_block_packing` API endpoint cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut end_epoch = highest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Block packing is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Since we pull a full epoch of data but are not guaranteed to have all blocks of + // that epoch available, only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_packing` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_block_packing` function with: + /// `start_epoch` -> epoch of lowest_beacon_block + /// `end_epoch` -> epoch of lowest filled `block_packing` - 1 (or epoch of highest beacon block) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn backfill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_packing_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `block_packing` table. + let lowest_filled_slot_opt = if self.config.block_packing { + database::get_lowest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot.as_slot() % self.slots_per_epoch == 0 { + lowest_filled_slot + .as_slot() + .epoch(self.slots_per_epoch) + .saturating_sub(Epoch::new(1)) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot().epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not backfill the `block_packing` table. + warn!("Refusing to backfill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_epoch <= 1 { + debug!("Block packing backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut start_epoch = lowest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch >= end_epoch { + debug!("Block packing is up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_packing_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + if start_epoch < end_epoch.saturating_sub(max_block_packing_backfill) { + start_epoch = end_epoch.saturating_sub(max_block_packing_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) + } + + // The `block_packing` API cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/block_rewards/database.rs b/watch/src/block_rewards/database.rs new file mode 100644 index 0000000000..a2bf49f3e4 --- /dev/null +++ b/watch/src/block_rewards/database.rs @@ -0,0 +1,137 @@ +use crate::database::{ + schema::{beacon_blocks, block_rewards}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_rewards)] +pub struct WatchBlockRewards { + pub slot: WatchSlot, + pub total: i32, + pub attestation_reward: i32, + pub sync_committee_reward: i32, +} + +/// Insert a batch of values into the `block_rewards` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_rewards( + conn: &mut PgConn, + rewards: Vec, +) -> Result<(), Error> { + use self::block_rewards::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in rewards.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_rewards) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block rewards inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_rewards` table where `slot` is minimum. +pub fn get_lowest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_rewards` table where `slot` is maximum. +pub fn get_highest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `root_query`. +pub fn get_block_rewards_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_rewards); + + let result = join + .select((slot, total, attestation_reward, sync_committee_reward)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `slot_query`. +pub fn get_block_rewards_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_rewards`. +#[allow(dead_code)] +pub fn get_unknown_block_rewards(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_rewards::dsl::block_rewards; + + let join = beacon_blocks.left_join(block_rewards); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block rewards cannot be retrieved for `slot == 0` so we need to exclude it. + .filter(slot.ne(0)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_rewards/mod.rs b/watch/src/block_rewards/mod.rs new file mode 100644 index 0000000000..0dac88ea58 --- /dev/null +++ b/watch/src/block_rewards/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +mod server; +mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; +pub use server::block_rewards_routes; + +use eth2::BeaconNodeHttpClient; +use types::Slot; + +/// Sends a request to `lighthouse/analysis/block_rewards`. +/// Formats the response into a vector of `WatchBlockRewards`. +/// +/// Will fail if `start_slot == 0`. +pub async fn get_block_rewards( + bn: &BeaconNodeHttpClient, + start_slot: Slot, + end_slot: Slot, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_rewards(start_slot, end_slot) + .await? + .into_iter() + .map(|data| WatchBlockRewards { + slot: WatchSlot::from_slot(data.meta.slot), + total: data.total as i32, + attestation_reward: data.attestation_rewards.total as i32, + sync_committee_reward: data.sync_committee_rewards as i32, + }) + .collect()) +} diff --git a/watch/src/block_rewards/server.rs b/watch/src/block_rewards/server.rs new file mode 100644 index 0000000000..480346e25b --- /dev/null +++ b/watch/src/block_rewards/server.rs @@ -0,0 +1,31 @@ +use crate::block_rewards::database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, WatchBlockRewards, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_rewards( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_rewards_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_rewards_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_rewards_routes() -> Router { + Router::new().route("/v1/blocks/:block/rewards", get(get_block_rewards)) +} diff --git a/watch/src/block_rewards/updater.rs b/watch/src/block_rewards/updater.rs new file mode 100644 index 0000000000..ad34b1f078 --- /dev/null +++ b/watch/src/block_rewards/updater.rs @@ -0,0 +1,157 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_rewards::get_block_rewards; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `block_rewards` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> highest filled `block_rewards` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn fill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_rewards` table. + let highest_filled_slot_opt = if self.config.block_rewards { + database::get_highest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let mut start_slot = if let Some(highest_filled_slot) = highest_filled_slot_opt { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `block_rewards` table. + warn!("Refusing to fill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Block rewards are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + database::insert_batch_block_rewards(&mut conn, rewards)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_rewards` tables starting from the entry with the + /// lowest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `block_rewards` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn backfill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_reward_backfill = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `block_rewards` table. + let lowest_filled_slot_opt = if self.config.block_rewards { + database::get_lowest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let end_slot = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `block_rewards` table. + warn!("Refusing to backfill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Block rewards backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Block rewards are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_reward_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + if start_slot < end_slot.saturating_sub(max_block_reward_backfill) { + start_slot = end_slot.saturating_sub(max_block_reward_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) + } + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + + if self.config.block_rewards { + database::insert_batch_block_rewards(&mut conn, rewards)?; + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/blockprint/config.rs b/watch/src/blockprint/config.rs new file mode 100644 index 0000000000..721fa7cb19 --- /dev/null +++ b/watch/src/blockprint/config.rs @@ -0,0 +1,40 @@ +use serde::{Deserialize, Serialize}; + +pub const fn enabled() -> bool { + false +} + +pub const fn url() -> Option { + None +} + +pub const fn username() -> Option { + None +} + +pub const fn password() -> Option { + None +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "enabled")] + pub enabled: bool, + #[serde(default = "url")] + pub url: Option, + #[serde(default = "username")] + pub username: Option, + #[serde(default = "password")] + pub password: Option, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: enabled(), + url: url(), + username: username(), + password: password(), + } + } +} diff --git a/watch/src/blockprint/database.rs b/watch/src/blockprint/database.rs new file mode 100644 index 0000000000..afa35c81b6 --- /dev/null +++ b/watch/src/blockprint/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + self, + schema::{beacon_blocks, blockprint}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::sql_types::{Integer, Text}; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Instant; + +type WatchConsensusClient = String; +pub fn list_consensus_clients() -> Vec { + vec![ + "Lighthouse".to_string(), + "Lodestar".to_string(), + "Nimbus".to_string(), + "Prysm".to_string(), + "Teku".to_string(), + "Unknown".to_string(), + ] +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = blockprint)] +pub struct WatchBlockprint { + pub slot: WatchSlot, + pub best_guess: WatchConsensusClient, +} + +#[derive(Debug, QueryableByName, diesel::FromSqlRow)] +pub struct WatchValidatorBlockprint { + #[diesel(sql_type = Integer)] + pub proposer_index: i32, + #[diesel(sql_type = Text)] + pub best_guess: WatchConsensusClient, + #[diesel(sql_type = Integer)] + pub slot: WatchSlot, +} + +/// Insert a batch of values into the `blockprint` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_blockprint( + conn: &mut PgConn, + prints: Vec, +) -> Result<(), Error> { + use self::blockprint::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in prints.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(blockprint) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Blockprint inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `blockprint` table where `slot` is minimum. +pub fn get_lowest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `blockprint` table where `slot` is maximum. +pub fn get_highest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `root_query`. +pub fn get_blockprint_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(blockprint); + + let result = join + .select((slot, best_guess)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `slot_query`. +pub fn get_blockprint_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `blockprint`. +#[allow(dead_code)] +pub fn get_unknown_blockprint(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::blockprint::dsl::blockprint; + + let join = beacon_blocks.left_join(blockprint); + + let result = join + .select(slot) + .filter(root.is_null()) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} + +/// Constructs a HashMap of `index` -> `best_guess` for each validator's latest proposal at or before +/// `target_slot`. +/// Inserts `"Unknown" if no prior proposals exist. +pub fn construct_validator_blockprints_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + use self::blockprint::dsl::{blockprint, slot}; + + let total_validators = + database::count_validators_activated_before_slot(conn, target_slot, slots_per_epoch)? + as usize; + + let mut blockprint_map = HashMap::with_capacity(total_validators); + + let latest_proposals = + database::get_all_validators_latest_proposer_info_at_slot(conn, target_slot)?; + + let latest_proposal_slots: Vec = latest_proposals.clone().into_keys().collect(); + + let result = blockprint + .filter(slot.eq_any(latest_proposal_slots)) + .load::(conn)?; + + // Insert the validators which have available blockprints. + for print in result { + if let Some(proposer) = latest_proposals.get(&print.slot) { + blockprint_map.insert(*proposer, print.best_guess); + } + } + + // Insert the rest of the unknown validators. + for validator_index in 0..total_validators { + blockprint_map + .entry(validator_index as i32) + .or_insert_with(|| "Unknown".to_string()); + } + + Ok(blockprint_map) +} + +/// Counts the number of occurances of each `client` present in the `validators` table at or before some +/// `target_slot`. +pub fn get_validators_clients_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + let mut client_map: HashMap = HashMap::new(); + + // This includes all validators which were activated at or before `target_slot`. + let validator_blockprints = + construct_validator_blockprints_at_slot(conn, target_slot, slots_per_epoch)?; + + for client in list_consensus_clients() { + let count = validator_blockprints + .iter() + .filter(|(_, v)| (*v).clone() == client) + .count(); + client_map.insert(client, count); + } + + Ok(client_map) +} diff --git a/watch/src/blockprint/mod.rs b/watch/src/blockprint/mod.rs new file mode 100644 index 0000000000..b8107e5bf5 --- /dev/null +++ b/watch/src/blockprint/mod.rs @@ -0,0 +1,149 @@ +pub mod database; +pub mod server; +pub mod updater; + +mod config; + +use crate::database::WatchSlot; + +use eth2::SensitiveUrl; +use reqwest::{Client, Response, Url}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Duration; +use types::Slot; + +pub use config::Config; +pub use database::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + list_consensus_clients, WatchBlockprint, +}; +pub use server::blockprint_routes; + +const TIMEOUT: Duration = Duration::from_secs(50); + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), + BlockprintNotSynced, + Other(String), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchBlockprintClient { + pub client: Client, + pub server: SensitiveUrl, + pub username: Option, + pub password: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintSyncingResponse { + pub greatest_block_slot: Slot, + pub synced: bool, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintResponse { + pub proposer_index: i32, + pub slot: Slot, + pub best_guess_single: String, +} + +impl WatchBlockprintClient { + async fn get(&self, url: Url) -> Result { + let mut builder = self.client.get(url).timeout(TIMEOUT); + if let Some(username) = &self.username { + builder = builder.basic_auth(username, self.password.as_ref()); + } + let response = builder.send().await.map_err(Error::Reqwest)?; + + if !response.status().is_success() { + return Err(Error::Other(response.text().await?)); + } + + Ok(response) + } + + // Returns the `greatest_block_slot` as reported by the Blockprint server. + // Will error if the Blockprint server is not synced. + #[allow(dead_code)] + pub async fn ensure_synced(&self) -> Result { + let url = self.server.full.join("sync/")?.join("status")?; + + let response = self.get(url).await?; + + let result = response.json::().await?; + if !result.synced { + return Err(Error::BlockprintNotSynced); + } + + Ok(result.greatest_block_slot) + } + + // Pulls the latest blockprint for all validators. + #[allow(dead_code)] + pub async fn blockprint_all_validators( + &self, + highest_validator: i32, + ) -> Result, Error> { + let url = self + .server + .full + .join("validator/")? + .join("blocks/")? + .join("latest")?; + + let response = self.get(url).await?; + + let mut result = response.json::>().await?; + result.retain(|print| print.proposer_index <= highest_validator); + + let mut map: HashMap = HashMap::with_capacity(result.len()); + for print in result { + map.insert(print.proposer_index, print.best_guess_single); + } + + Ok(map) + } + + // Construct a request to the Blockprint server for a range of slots between `start_slot` and + // `end_slot`. + pub async fn get_blockprint( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let url = self + .server + .full + .join("blocks/")? + .join(&format!("{start_slot}/{end_slot}"))?; + + let response = self.get(url).await?; + + let result = response + .json::>() + .await? + .iter() + .map(|response| WatchBlockprint { + slot: WatchSlot::from_slot(response.slot), + best_guess: response.best_guess_single.clone(), + }) + .collect(); + Ok(result) + } +} diff --git a/watch/src/blockprint/server.rs b/watch/src/blockprint/server.rs new file mode 100644 index 0000000000..488af15717 --- /dev/null +++ b/watch/src/blockprint/server.rs @@ -0,0 +1,31 @@ +use crate::blockprint::database::{ + get_blockprint_by_root, get_blockprint_by_slot, WatchBlockprint, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_blockprint( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_blockprint_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_blockprint_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn blockprint_routes() -> Router { + Router::new().route("/v1/blocks/:block/blockprint", get(get_blockprint)) +} diff --git a/watch/src/blockprint/updater.rs b/watch/src/blockprint/updater.rs new file mode 100644 index 0000000000..28c3184556 --- /dev/null +++ b/watch/src/blockprint/updater.rs @@ -0,0 +1,172 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `blockprint` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> highest filled `blockprint` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn fill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `blockprint` table. + let mut start_slot = if let Some(highest_filled_slot) = + database::get_highest_blockprint(&mut conn)?.map(|print| print.slot) + { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `blockprint` table. + warn!("Refusing to fill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Blockprint is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in either + // `blockprint` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + + Ok(()) + } + + /// Backfill the `blockprint` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `blockprint` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn backfill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + let max_blockprint_backfill = + self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `blockprint` table. + let end_slot = if let Some(lowest_filled_slot) = + database::get_lowest_blockprint(&mut conn)?.map(|print| print.slot) + { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `blockprint` table. + warn!("Refusing to backfill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Blockprint backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Blockprint are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_blockprint_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + if start_slot < end_slot.saturating_sub(max_blockprint_backfill) { + start_slot = end_slot.saturating_sub(max_blockprint_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) + } + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the `blockprint` + // table. This is a critical failure. It usually means someone has manually tampered with the + // database tables and should not occur during normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + Ok(()) + } +} diff --git a/watch/src/cli.rs b/watch/src/cli.rs new file mode 100644 index 0000000000..a8e5f3716f --- /dev/null +++ b/watch/src/cli.rs @@ -0,0 +1,55 @@ +use crate::{config::Config, logger, server, updater}; +use clap::{App, Arg}; +use tokio::sync::oneshot; + +pub const SERVE: &str = "serve"; +pub const RUN_UPDATER: &str = "run-updater"; +pub const CONFIG: &str = "config"; + +fn run_updater<'a, 'b>() -> App<'a, 'b> { + App::new(RUN_UPDATER).setting(clap::AppSettings::ColoredHelp) +} + +fn serve<'a, 'b>() -> App<'a, 'b> { + App::new(SERVE).setting(clap::AppSettings::ColoredHelp) +} + +pub fn app<'a, 'b>() -> App<'a, 'b> { + App::new("beacon_watch_daemon") + .author("Sigma Prime ") + .setting(clap::AppSettings::ColoredHelp) + .arg( + Arg::with_name(CONFIG) + .long(CONFIG) + .value_name("PATH_TO_CONFIG") + .help("Path to configuration file") + .takes_value(true) + .global(true), + ) + .subcommand(run_updater()) + .subcommand(serve()) +} + +pub async fn run() -> Result<(), String> { + let matches = app().get_matches(); + + let config = match matches.value_of(CONFIG) { + Some(path) => Config::load_from_file(path.to_string())?, + None => Config::default(), + }; + + logger::init_logger(&config.log_level); + + match matches.subcommand() { + (RUN_UPDATER, Some(_)) => updater::run_updater(config) + .await + .map_err(|e| format!("Failure: {:?}", e)), + (SERVE, Some(_)) => { + let (_shutdown_tx, shutdown_rx) = oneshot::channel(); + server::serve(config, shutdown_rx) + .await + .map_err(|e| format!("Failure: {:?}", e)) + } + _ => Err("Unsupported subcommand. See --help".into()), + } +} diff --git a/watch/src/client.rs b/watch/src/client.rs new file mode 100644 index 0000000000..43aaccde34 --- /dev/null +++ b/watch/src/client.rs @@ -0,0 +1,178 @@ +use crate::block_packing::WatchBlockPacking; +use crate::block_rewards::WatchBlockRewards; +use crate::database::models::{ + WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator, +}; +use crate::suboptimal_attestations::WatchAttestation; + +use eth2::types::BlockId; +use reqwest::Client; +use serde::de::DeserializeOwned; +use types::Hash256; +use url::Url; + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchHttpClient { + pub client: Client, + pub server: Url, +} + +impl WatchHttpClient { + async fn get_opt(&self, url: Url) -> Result, Error> { + let response = self.client.get(url).send().await?; + + if response.status() == 404 { + Ok(None) + } else { + response + .error_for_status()? + .json() + .await + .map_err(Into::into) + } + } + + pub async fn get_beacon_blocks( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&block_id.to_string())?; + + self.get_opt(url).await + } + + pub async fn get_lowest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_lowest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_next_beacon_block( + &self, + parent: Hash256, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{parent:?}/"))? + .join("next")?; + + self.get_opt(url).await + } + + pub async fn get_validator_by_index( + &self, + index: i32, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join(&format!("{index}"))?; + + self.get_opt(url).await + } + + pub async fn get_proposer_info( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("proposer")?; + + self.get_opt(url).await + } + + pub async fn get_block_reward( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("rewards")?; + + self.get_opt(url).await + } + + pub async fn get_block_packing( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("packing")?; + + self.get_opt(url).await + } + + pub async fn get_all_validators(&self) -> Result>, Error> { + let url = self.server.join("v1/")?.join("validators/")?.join("all")?; + + self.get_opt(url).await + } + + pub async fn get_attestations( + &self, + epoch: i32, + ) -> Result>, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join("all/")? + .join("attestation/")? + .join(&format!("{epoch}"))?; + + self.get_opt(url).await + } +} diff --git a/watch/src/config.rs b/watch/src/config.rs new file mode 100644 index 0000000000..4e61f9df9c --- /dev/null +++ b/watch/src/config.rs @@ -0,0 +1,50 @@ +use crate::blockprint::Config as BlockprintConfig; +use crate::database::Config as DatabaseConfig; +use crate::server::Config as ServerConfig; +use crate::updater::Config as UpdaterConfig; + +use serde::{Deserialize, Serialize}; +use std::fs::File; + +pub const LOG_LEVEL: &str = "debug"; + +fn log_level() -> String { + LOG_LEVEL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default)] + pub blockprint: BlockprintConfig, + #[serde(default)] + pub database: DatabaseConfig, + #[serde(default)] + pub server: ServerConfig, + #[serde(default)] + pub updater: UpdaterConfig, + /// The minimum severity for log messages. + #[serde(default = "log_level")] + pub log_level: String, +} + +impl Default for Config { + fn default() -> Self { + Self { + blockprint: BlockprintConfig::default(), + database: DatabaseConfig::default(), + server: ServerConfig::default(), + updater: UpdaterConfig::default(), + log_level: log_level(), + } + } +} + +impl Config { + pub fn load_from_file(path_to_file: String) -> Result { + let file = + File::open(path_to_file).map_err(|e| format!("Error reading config file: {:?}", e))?; + let config: Config = serde_yaml::from_reader(file) + .map_err(|e| format!("Error parsing config file: {:?}", e))?; + Ok(config) + } +} diff --git a/watch/src/database/compat.rs b/watch/src/database/compat.rs new file mode 100644 index 0000000000..b8cda0b216 --- /dev/null +++ b/watch/src/database/compat.rs @@ -0,0 +1,49 @@ +//! Implementations of PostgreSQL compatibility traits. +use crate::database::watch_types::{WatchHash, WatchPK, WatchSlot}; +use diesel::deserialize::{self, FromSql}; +use diesel::pg::{Pg, PgValue}; +use diesel::serialize::{self, Output, ToSql}; +use diesel::sql_types::{Binary, Integer}; + +use std::convert::TryFrom; + +macro_rules! impl_to_from_sql_int { + ($type:ty) => { + impl ToSql for $type + where + i32: ToSql, + { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let v = i32::try_from(self.as_u64()).map_err(|e| Box::new(e))?; + >::to_sql(&v, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Ok(Self::new(i32::from_sql(bytes)? as u64)) + } + } + }; +} + +macro_rules! impl_to_from_sql_binary { + ($type:ty) => { + impl ToSql for $type { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let b = self.as_bytes(); + <&[u8] as ToSql>::to_sql(&b, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Self::from_bytes(bytes.as_bytes()).map_err(|e| e.to_string().into()) + } + } + }; +} + +impl_to_from_sql_int!(WatchSlot); +impl_to_from_sql_binary!(WatchHash); +impl_to_from_sql_binary!(WatchPK); diff --git a/watch/src/database/config.rs b/watch/src/database/config.rs new file mode 100644 index 0000000000..dc0c70832f --- /dev/null +++ b/watch/src/database/config.rs @@ -0,0 +1,74 @@ +use serde::{Deserialize, Serialize}; + +pub const USER: &str = "postgres"; +pub const PASSWORD: &str = "postgres"; +pub const DBNAME: &str = "dev"; +pub const DEFAULT_DBNAME: &str = "postgres"; +pub const HOST: &str = "localhost"; +pub const fn port() -> u16 { + 5432 +} +pub const fn connect_timeout_millis() -> u64 { + 2_000 // 2s +} + +fn user() -> String { + USER.to_string() +} + +fn password() -> String { + PASSWORD.to_string() +} + +fn dbname() -> String { + DBNAME.to_string() +} + +fn default_dbname() -> String { + DEFAULT_DBNAME.to_string() +} + +fn host() -> String { + HOST.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "user")] + pub user: String, + #[serde(default = "password")] + pub password: String, + #[serde(default = "dbname")] + pub dbname: String, + #[serde(default = "default_dbname")] + pub default_dbname: String, + #[serde(default = "host")] + pub host: String, + #[serde(default = "port")] + pub port: u16, + #[serde(default = "connect_timeout_millis")] + pub connect_timeout_millis: u64, +} + +impl Default for Config { + fn default() -> Self { + Self { + user: user(), + password: password(), + dbname: dbname(), + default_dbname: default_dbname(), + host: host(), + port: port(), + connect_timeout_millis: connect_timeout_millis(), + } + } +} + +impl Config { + pub fn build_database_url(&self) -> String { + format!( + "postgres://{}:{}@{}:{}/{}", + self.user, self.password, self.host, self.port, self.dbname + ) + } +} diff --git a/watch/src/database/error.rs b/watch/src/database/error.rs new file mode 100644 index 0000000000..8c5088fa13 --- /dev/null +++ b/watch/src/database/error.rs @@ -0,0 +1,55 @@ +use bls::Error as BlsError; +use diesel::result::{ConnectionError, Error as PgError}; +use eth2::SensitiveError; +use r2d2::Error as PoolError; +use std::fmt; +use types::BeaconStateError; + +#[derive(Debug)] +pub enum Error { + BeaconState(BeaconStateError), + Database(PgError), + DatabaseCorrupted, + InvalidSig(BlsError), + PostgresConnection(ConnectionError), + Pool(PoolError), + SensitiveUrl(SensitiveError), + InvalidRoot, + Other(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconState(e) + } +} + +impl From for Error { + fn from(e: ConnectionError) -> Self { + Error::PostgresConnection(e) + } +} + +impl From for Error { + fn from(e: PgError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: PoolError) -> Self { + Error::Pool(e) + } +} + +impl From for Error { + fn from(e: BlsError) -> Self { + Error::InvalidSig(e) + } +} diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs new file mode 100644 index 0000000000..b9a7a900a5 --- /dev/null +++ b/watch/src/database/mod.rs @@ -0,0 +1,782 @@ +mod config; +mod error; + +pub mod compat; +pub mod models; +pub mod schema; +pub mod utils; +pub mod watch_types; + +use self::schema::{ + active_config, beacon_blocks, canonical_slots, proposer_info, suboptimal_attestations, + validators, +}; + +use diesel::dsl::max; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel::r2d2::{Builder, ConnectionManager, Pool, PooledConnection}; +use diesel::upsert::excluded; +use log::{debug, info}; +use std::collections::HashMap; +use std::time::Instant; +use types::{EthSpec, SignedBeaconBlock}; + +pub use self::error::Error; +pub use self::models::{WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator}; +pub use self::watch_types::{WatchHash, WatchPK, WatchSlot}; + +pub use crate::block_rewards::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; + +pub use crate::block_packing::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; + +pub use crate::suboptimal_attestations::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use crate::blockprint::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + WatchBlockprint, +}; + +pub use config::Config; + +/// Batch inserts cannot exceed a certain size. +/// See https://github.com/diesel-rs/diesel/issues/2414. +/// For some reason, this seems to translate to 65535 / 5 (13107) records. +pub const MAX_SIZE_BATCH_INSERT: usize = 13107; + +pub type PgPool = Pool>; +pub type PgConn = PooledConnection>; + +/// Connect to a Postgresql database and build a connection pool. +pub fn build_connection_pool(config: &Config) -> Result { + let database_url = config.clone().build_database_url(); + info!("Building connection pool at: {database_url}"); + let pg = ConnectionManager::::new(&database_url); + Builder::new().build(pg).map_err(Error::Pool) +} + +/// Retrieve an idle connection from the pool. +pub fn get_connection(pool: &PgPool) -> Result { + pool.get().map_err(Error::Pool) +} + +/// Insert the active config into the database. This is used to check if the connected beacon node +/// is compatible with the database. These values will not change (except +/// `current_blockprint_checkpoint`). +pub fn insert_active_config( + conn: &mut PgConn, + new_config_name: String, + new_slots_per_epoch: u64, +) -> Result<(), Error> { + use self::active_config::dsl::*; + + diesel::insert_into(active_config) + .values(&vec![( + id.eq(1), + config_name.eq(new_config_name), + slots_per_epoch.eq(new_slots_per_epoch as i32), + )]) + .on_conflict_do_nothing() + .execute(conn)?; + + Ok(()) +} + +/// Get the active config from the database. +pub fn get_active_config(conn: &mut PgConn) -> Result, Error> { + use self::active_config::dsl::*; + Ok(active_config + .select((config_name, slots_per_epoch)) + .filter(id.eq(1)) + .first::<(String, i32)>(conn) + .optional()?) +} + +/// +/// INSERT statements +/// + +/// Inserts a single row into the `canonical_slots` table. +/// If `new_slot.beacon_block` is `None`, the value in the row will be `null`. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_canonical_slot(conn: &mut PgConn, new_slot: WatchCanonicalSlot) -> Result<(), Error> { + diesel::insert_into(canonical_slots::table) + .values(&new_slot) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Canonical slot inserted: {}", new_slot.slot); + Ok(()) +} + +pub fn insert_beacon_block( + conn: &mut PgConn, + block: SignedBeaconBlock, + root: WatchHash, +) -> Result<(), Error> { + use self::canonical_slots::dsl::{beacon_block, slot as canonical_slot}; + + let block_message = block.message(); + + // Pull out relevant values from the block. + let slot = WatchSlot::from_slot(block.slot()); + let parent_root = WatchHash::from_hash(block.parent_root()); + let proposer_index = block_message.proposer_index() as i32; + let graffiti = block_message.body().graffiti().as_utf8_lossy(); + let attestation_count = block_message.body().attestations().len() as i32; + + let full_payload = block_message.execution_payload().ok(); + + let transaction_count: Option = if let Some(bellatrix_payload) = + full_payload.and_then(|payload| payload.execution_payload_merge().ok()) + { + Some(bellatrix_payload.transactions.len() as i32) + } else { + full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.transactions.len() as i32) + }; + + let withdrawal_count: Option = full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.withdrawals.len() as i32); + + let block_to_add = WatchBeaconBlock { + slot, + root, + parent_root, + attestation_count, + transaction_count, + withdrawal_count, + }; + + let proposer_info_to_add = WatchProposerInfo { + slot, + proposer_index, + graffiti, + }; + + // Update the canonical slots table. + diesel::update(canonical_slots::table) + .set(beacon_block.eq(root)) + .filter(canonical_slot.eq(slot)) + // Do not overwrite the value if it already exists. + .filter(beacon_block.is_null()) + .execute(conn)?; + + diesel::insert_into(beacon_blocks::table) + .values(block_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + diesel::insert_into(proposer_info::table) + .values(proposer_info_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Beacon block inserted at slot: {slot}, root: {root}, parent: {parent_root}"); + Ok(()) +} + +/// Insert a validator into the `validators` table +/// +/// On a conflict, it will only overwrite `status`, `activation_epoch` and `exit_epoch`. +pub fn insert_validator(conn: &mut PgConn, validator: WatchValidator) -> Result<(), Error> { + use self::validators::dsl::*; + let new_index = validator.index; + let new_public_key = validator.public_key; + + diesel::insert_into(validators) + .values(validator) + .on_conflict(index) + .do_update() + .set(( + status.eq(excluded(status)), + activation_epoch.eq(excluded(activation_epoch)), + exit_epoch.eq(excluded(exit_epoch)), + )) + .execute(conn)?; + + debug!("Validator inserted, index: {new_index}, public_key: {new_public_key}"); + Ok(()) +} + +/// Insert a batch of values into the `validators` table. +/// +/// On a conflict, it will do nothing. +/// +/// Should not be used when updating validators. +/// Validators should be updated through the `insert_validator` function which contains the correct +/// `on_conflict` clauses. +pub fn insert_batch_validators( + conn: &mut PgConn, + all_validators: Vec, +) -> Result<(), Error> { + use self::validators::dsl::*; + + let mut count = 0; + + for chunk in all_validators.chunks(1000) { + count += diesel::insert_into(validators) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + debug!("Validators inserted, count: {count}"); + Ok(()) +} + +/// +/// SELECT statements +/// + +/// Selects a single row of the `canonical_slots` table corresponding to a given `slot_query`. +pub fn get_canonical_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `canonical_slots` table corresponding to a given `root_query`. +/// Only returns the non-skipped slot which matches `root`. +pub fn get_canonical_slot_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(root.eq(root_query)) + .filter(skipped.eq(false)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical root requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `root` from a single row of the `canonical_slots` table corresponding to a given +/// `slot_query`. +#[allow(dead_code)] +pub fn get_root_at_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .select(root) + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot`. +pub fn get_lowest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot` and where `skipped == false`. +pub fn get_lowest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest_non_skipped, time taken: {time_taken:?})"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot`. +pub fn get_highest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot` and where `skipped == false`. +pub fn get_highest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest_non_skipped, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `canonical_slots` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_canonical_slots_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Canonical slots by range requested, start_slot: {}, end_slot: {}, time_taken: {:?}", + start_slot.as_u64(), + end_slot.as_u64(), + time_taken + ); + Ok(result) +} + +/// Selects `root` from all rows of the `canonical_slots` table which have `beacon_block == null` +/// and `skipped == false` +pub fn get_unknown_canonical_blocks(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + + let result = canonical_slots + .select(root) + .filter(beacon_block.is_null()) + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .load::(conn)?; + + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is minimum. +pub fn get_lowest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is maximum. +pub fn get_highest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `root_query`. +pub fn get_beacon_block_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `slot_query`. +pub fn get_beacon_block_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `parent_root` equals the given `parent`. +/// This fetches the next block in the database. +/// +/// Will return `Ok(None)` if there are no matching blocks (e.g. the tip of the chain). +pub fn get_beacon_block_with_parent( + conn: &mut PgConn, + parent: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(parent_root.eq(parent)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Next beacon block requested: {parent}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `beacon_blocks` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_beacon_blocks_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon blocks by range requested, start_slot: {start_slot}, end_slot: {end_slot}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `root_query`. +pub fn get_proposer_info_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(proposer_info); + + let result = join + .select((slot, proposer_index, graffiti)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for block: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +pub fn get_proposer_info_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for slot: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects multiple rows of the `proposer_info` table between `start_slot` and `end_slot`. +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +#[allow(dead_code)] +pub fn get_proposer_info_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Proposer info requested for range: {start_slot} to {end_slot}, time taken: {time_taken:?}" + ); + Ok(result) +} + +pub fn get_validators_latest_proposer_info( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let proposers = proposer_info + .filter(proposer_index.eq_any(indices_query)) + .load::(conn)?; + + let mut result = HashMap::new(); + for proposer in proposers { + result + .entry(proposer.proposer_index) + .or_insert_with(|| proposer.clone()); + let entry = result + .get_mut(&proposer.proposer_index) + .ok_or_else(|| Error::Other("An internal error occured".to_string()))?; + if proposer.slot > entry.slot { + entry.slot = proposer.slot + } + } + + Ok(result) +} + +/// Selects the max(`slot`) and `proposer_index` of each unique index in the +/// `proposer_info` table and returns them formatted as a `HashMap`. +/// Only returns rows which have `slot <= target_slot`. +/// +/// Ideally, this would return the full row, but I have not found a way to do that without using +/// a much more expensive SQL query. +pub fn get_all_validators_latest_proposer_info_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let latest_proposals: Vec<(i32, Option)> = proposer_info + .group_by(proposer_index) + .select((proposer_index, max(slot))) + .filter(slot.le(target_slot)) + .load::<(i32, Option)>(conn)?; + + let mut result = HashMap::new(); + + for proposal in latest_proposals { + if let Some(latest_slot) = proposal.1 { + result.insert(latest_slot, proposal.0); + } + } + + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `validator_index_query`. +pub fn get_validator_by_index( + conn: &mut PgConn, + validator_index_query: i32, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(index.eq(validator_index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {validator_index_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `public_key_query`. +pub fn get_validator_by_public_key( + conn: &mut PgConn, + public_key_query: WatchPK, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(public_key.eq(public_key_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {public_key_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects all rows from the `validators` table which have an `index` contained in +/// the `indices_query`. +#[allow(dead_code)] +pub fn get_validators_by_indices( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let query_len = indices_query.len(); + let result = validators + .filter(index.eq_any(indices_query)) + .load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("{query_len} validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +// Selects all rows from the `validators` table. +pub fn get_all_validators(conn: &mut PgConn) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators.load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("All validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +/// Counts the number of rows in the `validators` table. +#[allow(dead_code)] +pub fn count_validators(conn: &mut PgConn) -> Result { + use self::validators::dsl::*; + + validators.count().get_result(conn).map_err(Error::Database) +} + +/// Counts the number of rows in the `validators` table where +/// `activation_epoch <= target_slot.epoch()`. +pub fn count_validators_activated_before_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result { + use self::validators::dsl::*; + + let target_epoch = target_slot.epoch(slots_per_epoch); + + validators + .count() + .filter(activation_epoch.le(target_epoch.as_u64() as i32)) + .get_result(conn) + .map_err(Error::Database) +} + +/// +/// DELETE statements. +/// + +/// Deletes all rows of the `canonical_slots` table which have `slot` greater than `slot_query`. +/// +/// Due to the ON DELETE CASCADE clause present in the database migration SQL, deleting rows from +/// `canonical_slots` will delete all corresponding rows in `beacon_blocks, `block_rewards`, +/// `block_packing` and `proposer_info`. +pub fn delete_canonical_slots_above( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result { + use self::canonical_slots::dsl::*; + + let result = diesel::delete(canonical_slots) + .filter(slot.gt(slot_query)) + .execute(conn)?; + + debug!("Deleted canonical slots above {slot_query}: {result} rows deleted"); + Ok(result) +} + +/// Deletes all rows of the `suboptimal_attestations` table which have `epoch_start_slot` greater +/// than `epoch_start_slot_query`. +pub fn delete_suboptimal_attestations_above( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result { + use self::suboptimal_attestations::dsl::*; + + let result = diesel::delete(suboptimal_attestations) + .filter(epoch_start_slot.gt(epoch_start_slot_query)) + .execute(conn)?; + + debug!("Deleted attestations above: {epoch_start_slot_query}, rows deleted: {result}"); + Ok(result) +} diff --git a/watch/src/database/models.rs b/watch/src/database/models.rs new file mode 100644 index 0000000000..f42444d661 --- /dev/null +++ b/watch/src/database/models.rs @@ -0,0 +1,67 @@ +use crate::database::{ + schema::{beacon_blocks, canonical_slots, proposer_info, validators}, + watch_types::{WatchHash, WatchPK, WatchSlot}, +}; +use diesel::{Insertable, Queryable}; +use serde::{Deserialize, Serialize}; +use std::hash::{Hash, Hasher}; + +pub type WatchEpoch = i32; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = canonical_slots)] +pub struct WatchCanonicalSlot { + pub slot: WatchSlot, + pub root: WatchHash, + pub skipped: bool, + pub beacon_block: Option, +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = beacon_blocks)] +pub struct WatchBeaconBlock { + pub slot: WatchSlot, + pub root: WatchHash, + pub parent_root: WatchHash, + pub attestation_count: i32, + pub transaction_count: Option, + pub withdrawal_count: Option, +} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = validators)] +pub struct WatchValidator { + pub index: i32, + pub public_key: WatchPK, + pub status: String, + pub activation_epoch: Option, + pub exit_epoch: Option, +} + +// Implement a minimal version of `Hash` and `Eq` so that we know if a validator status has changed. +impl Hash for WatchValidator { + fn hash(&self, state: &mut H) { + self.index.hash(state); + self.status.hash(state); + self.activation_epoch.hash(state); + self.exit_epoch.hash(state); + } +} + +impl PartialEq for WatchValidator { + fn eq(&self, other: &Self) -> bool { + self.index == other.index + && self.status == other.status + && self.activation_epoch == other.activation_epoch + && self.exit_epoch == other.exit_epoch + } +} +impl Eq for WatchValidator {} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = proposer_info)] +pub struct WatchProposerInfo { + pub slot: WatchSlot, + pub proposer_index: i32, + pub graffiti: String, +} diff --git a/watch/src/database/schema.rs b/watch/src/database/schema.rs new file mode 100644 index 0000000000..32f22d506d --- /dev/null +++ b/watch/src/database/schema.rs @@ -0,0 +1,102 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + active_config (id) { + id -> Int4, + config_name -> Text, + slots_per_epoch -> Int4, + } +} + +diesel::table! { + beacon_blocks (slot) { + slot -> Int4, + root -> Bytea, + parent_root -> Bytea, + attestation_count -> Int4, + transaction_count -> Nullable, + withdrawal_count -> Nullable, + } +} + +diesel::table! { + block_packing (slot) { + slot -> Int4, + available -> Int4, + included -> Int4, + prior_skip_slots -> Int4, + } +} + +diesel::table! { + block_rewards (slot) { + slot -> Int4, + total -> Int4, + attestation_reward -> Int4, + sync_committee_reward -> Int4, + } +} + +diesel::table! { + blockprint (slot) { + slot -> Int4, + best_guess -> Text, + } +} + +diesel::table! { + canonical_slots (slot) { + slot -> Int4, + root -> Bytea, + skipped -> Bool, + beacon_block -> Nullable, + } +} + +diesel::table! { + proposer_info (slot) { + slot -> Int4, + proposer_index -> Int4, + graffiti -> Text, + } +} + +diesel::table! { + suboptimal_attestations (epoch_start_slot, index) { + epoch_start_slot -> Int4, + index -> Int4, + source -> Bool, + head -> Bool, + target -> Bool, + } +} + +diesel::table! { + validators (index) { + index -> Int4, + public_key -> Bytea, + status -> Text, + activation_epoch -> Nullable, + exit_epoch -> Nullable, + } +} + +diesel::joinable!(block_packing -> beacon_blocks (slot)); +diesel::joinable!(block_rewards -> beacon_blocks (slot)); +diesel::joinable!(blockprint -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> validators (proposer_index)); +diesel::joinable!(suboptimal_attestations -> canonical_slots (epoch_start_slot)); +diesel::joinable!(suboptimal_attestations -> validators (index)); + +diesel::allow_tables_to_appear_in_same_query!( + active_config, + beacon_blocks, + block_packing, + block_rewards, + blockprint, + canonical_slots, + proposer_info, + suboptimal_attestations, + validators, +); diff --git a/watch/src/database/utils.rs b/watch/src/database/utils.rs new file mode 100644 index 0000000000..7e450f0cee --- /dev/null +++ b/watch/src/database/utils.rs @@ -0,0 +1,29 @@ +#![allow(dead_code)] +use crate::database::config::Config; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel_migrations::{FileBasedMigrations, MigrationHarness}; + +/// Sets `config.dbname` to `config.default_dbname` and returns `(new_config, old_dbname)`. +/// +/// This is useful for creating or dropping databases, since these actions must be done by +/// logging into another database. +pub fn get_config_using_default_db(config: &Config) -> (Config, String) { + let mut config = config.clone(); + let new_dbname = std::mem::replace(&mut config.dbname, config.default_dbname.clone()); + (config, new_dbname) +} + +/// Runs the set of migrations as detected in the local directory. +/// Equivalent to `diesel migration run`. +/// +/// Contains `unwrap`s so is only suitable for test code. +/// TODO(mac) refactor to return Result +pub fn run_migrations(config: &Config) -> PgConnection { + let database_url = config.clone().build_database_url(); + let mut conn = PgConnection::establish(&database_url).unwrap(); + let migrations = FileBasedMigrations::find_migrations_directory().unwrap(); + conn.run_pending_migrations(migrations).unwrap(); + conn.begin_test_transaction().unwrap(); + conn +} diff --git a/watch/src/database/watch_types.rs b/watch/src/database/watch_types.rs new file mode 100644 index 0000000000..0b3ba2c304 --- /dev/null +++ b/watch/src/database/watch_types.rs @@ -0,0 +1,119 @@ +use crate::database::error::Error; +use diesel::{ + sql_types::{Binary, Integer}, + AsExpression, FromSqlRow, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::str::FromStr; +use types::{Epoch, Hash256, PublicKeyBytes, Slot}; +#[derive( + Clone, + Copy, + Debug, + AsExpression, + FromSqlRow, + Deserialize, + Serialize, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +#[diesel(sql_type = Integer)] +pub struct WatchSlot(Slot); + +impl fmt::Display for WatchSlot { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl WatchSlot { + pub fn new(slot: u64) -> Self { + Self(Slot::new(slot)) + } + + pub fn from_slot(slot: Slot) -> Self { + Self(slot) + } + + pub fn as_slot(self) -> Slot { + self.0 + } + + pub fn as_u64(self) -> u64 { + self.0.as_u64() + } + + pub fn epoch(self, slots_per_epoch: u64) -> Epoch { + self.as_slot().epoch(slots_per_epoch) + } +} + +#[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Deserialize, Serialize)] +#[diesel(sql_type = Binary)] +pub struct WatchHash(Hash256); + +impl fmt::Display for WatchHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchHash { + pub fn as_hash(&self) -> Hash256 { + self.0 + } + + pub fn from_hash(hash: Hash256) -> Self { + WatchHash(hash) + } + + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } + + pub fn from_bytes(src: &[u8]) -> Result { + if src.len() == 32 { + Ok(WatchHash(Hash256::from_slice(src))) + } else { + Err(Error::InvalidRoot) + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, AsExpression, FromSqlRow, Serialize, Deserialize)] +#[diesel(sql_type = Binary)] +pub struct WatchPK(PublicKeyBytes); + +impl fmt::Display for WatchPK { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchPK { + pub fn as_bytes(&self) -> &[u8] { + self.0.as_serialized() + } + + pub fn from_bytes(src: &[u8]) -> Result { + Ok(WatchPK(PublicKeyBytes::deserialize(src)?)) + } + + pub fn from_pubkey(key: PublicKeyBytes) -> Self { + WatchPK(key) + } +} + +impl FromStr for WatchPK { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(WatchPK( + PublicKeyBytes::from_str(s).map_err(|e| format!("Cannot be parsed: {}", e))?, + )) + } +} diff --git a/watch/src/lib.rs b/watch/src/lib.rs new file mode 100644 index 0000000000..664c945165 --- /dev/null +++ b/watch/src/lib.rs @@ -0,0 +1,12 @@ +#![cfg(unix)] +pub mod block_packing; +pub mod block_rewards; +pub mod blockprint; +pub mod cli; +pub mod client; +pub mod config; +pub mod database; +pub mod logger; +pub mod server; +pub mod suboptimal_attestations; +pub mod updater; diff --git a/watch/src/logger.rs b/watch/src/logger.rs new file mode 100644 index 0000000000..49310b42aa --- /dev/null +++ b/watch/src/logger.rs @@ -0,0 +1,24 @@ +use env_logger::Builder; +use log::{info, LevelFilter}; +use std::process; + +pub fn init_logger(log_level: &str) { + let log_level = match log_level.to_lowercase().as_str() { + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "info" => LevelFilter::Info, + "warn" => LevelFilter::Warn, + "error" => LevelFilter::Error, + _ => { + eprintln!("Unsupported log level"); + process::exit(1) + } + }; + + let mut builder = Builder::new(); + builder.filter(Some("watch"), log_level); + + builder.init(); + + info!("Logger initialized with log-level: {log_level}"); +} diff --git a/watch/src/main.rs b/watch/src/main.rs new file mode 100644 index 0000000000..f971747da4 --- /dev/null +++ b/watch/src/main.rs @@ -0,0 +1,41 @@ +#[cfg(unix)] +use std::process; + +#[cfg(unix)] +mod block_packing; +#[cfg(unix)] +mod block_rewards; +#[cfg(unix)] +mod blockprint; +#[cfg(unix)] +mod cli; +#[cfg(unix)] +mod config; +#[cfg(unix)] +mod database; +#[cfg(unix)] +mod logger; +#[cfg(unix)] +mod server; +#[cfg(unix)] +mod suboptimal_attestations; +#[cfg(unix)] +mod updater; + +#[cfg(unix)] +#[tokio::main] +async fn main() { + match cli::run().await { + Ok(()) => process::exit(0), + Err(e) => { + eprintln!("Command failed with: {}", e); + drop(e); + process::exit(1) + } + } +} + +#[cfg(windows)] +fn main() { + eprintln!("Windows is not supported. Exiting."); +} diff --git a/watch/src/server/config.rs b/watch/src/server/config.rs new file mode 100644 index 0000000000..a7d38e706f --- /dev/null +++ b/watch/src/server/config.rs @@ -0,0 +1,28 @@ +use serde::{Deserialize, Serialize}; +use std::net::IpAddr; + +pub const LISTEN_ADDR: &str = "127.0.0.1"; + +pub const fn listen_port() -> u16 { + 5059 +} +fn listen_addr() -> IpAddr { + LISTEN_ADDR.parse().expect("Server address is not valid") +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "listen_addr")] + pub listen_addr: IpAddr, + #[serde(default = "listen_port")] + pub listen_port: u16, +} + +impl Default for Config { + fn default() -> Self { + Self { + listen_addr: listen_addr(), + listen_port: listen_port(), + } + } +} diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs new file mode 100644 index 0000000000..d1542f7841 --- /dev/null +++ b/watch/src/server/error.rs @@ -0,0 +1,50 @@ +use crate::database::Error as DbError; +use axum::Error as AxumError; +use axum::{http::StatusCode, response::IntoResponse, Json}; +use hyper::Error as HyperError; +use serde_json::json; + +#[derive(Debug)] +pub enum Error { + Axum(AxumError), + Hyper(HyperError), + Database(DbError), + BadRequest, + NotFound, + Other(String), +} + +impl IntoResponse for Error { + fn into_response(self) -> axum::response::Response { + let (status, error_message) = match self { + Self::BadRequest => (StatusCode::BAD_REQUEST, "Bad Request"), + Self::NotFound => (StatusCode::NOT_FOUND, "Not Found"), + _ => (StatusCode::INTERNAL_SERVER_ERROR, "Internal Server Error"), + }; + (status, Json(json!({ "error": error_message }))).into_response() + } +} + +impl From for Error { + fn from(e: HyperError) -> Self { + Error::Hyper(e) + } +} + +impl From for Error { + fn from(e: AxumError) -> Self { + Error::Axum(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} diff --git a/watch/src/server/handler.rs b/watch/src/server/handler.rs new file mode 100644 index 0000000000..6777026867 --- /dev/null +++ b/watch/src/server/handler.rs @@ -0,0 +1,266 @@ +use crate::database::{ + self, Error as DbError, PgPool, WatchBeaconBlock, WatchCanonicalSlot, WatchHash, WatchPK, + WatchProposerInfo, WatchSlot, WatchValidator, +}; +use crate::server::Error; +use axum::{ + extract::{Path, Query}, + Extension, Json, +}; +use eth2::types::BlockId; +use std::collections::HashMap; +use std::str::FromStr; + +pub async fn get_slot( + Path(slot): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_canonical_slot( + &mut conn, + WatchSlot::new(slot), + )?)) +} + +pub async fn get_slot_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slot_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slots_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_canonical_slots_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + let block_id: BlockId = BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)?; + match block_id { + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + BlockId::Root(root) => Ok(Json(database::get_beacon_block_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_previous( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => { + if let Some(block) = + database::get_beacon_block_by_root(&mut conn, WatchHash::from_hash(root))? + .map(|block| block.parent_root) + { + Ok(Json(database::get_beacon_block_by_root(&mut conn, block)?)) + } else { + Err(Error::NotFound) + } + } + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::new(slot.as_u64().checked_sub(1_u64).ok_or(Error::NotFound)?), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_next( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_beacon_block_with_parent( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot + 1_u64), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_blocks_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_beacon_blocks_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block_proposer( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_proposer_info_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_proposer_info_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validator( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_public_key( + &mut conn, pubkey, + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_index(&mut conn, index)?)) + } +} + +pub async fn get_all_validators( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_all_validators(&mut conn)?)) +} + +pub async fn get_validator_latest_proposal( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + let validator = + database::get_validator_by_public_key(&mut conn, pubkey)?.ok_or(Error::NotFound)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![validator.index], + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![index], + )?)) + } +} + +pub async fn get_client_breakdown( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + Ok(Json(database::get_validators_clients_at_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?)) + } else { + Err(Error::Database(DbError::Other( + "No slots found in database.".to_string(), + ))) + } +} + +pub async fn get_client_breakdown_percentages( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + let mut result = HashMap::new(); + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + let total = database::count_validators_activated_before_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?; + let clients = + database::get_validators_clients_at_slot(&mut conn, target_slot.slot, slots_per_epoch)?; + for (client, number) in clients.iter() { + let percentage: f64 = *number as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs new file mode 100644 index 0000000000..09d5ec6aac --- /dev/null +++ b/watch/src/server/mod.rs @@ -0,0 +1,134 @@ +use crate::block_packing::block_packing_routes; +use crate::block_rewards::block_rewards_routes; +use crate::blockprint::blockprint_routes; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool}; +use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes}; +use axum::{ + handler::Handler, + http::{StatusCode, Uri}, + routing::get, + Extension, Json, Router, +}; +use eth2::types::ErrorMessage; +use log::info; +use std::future::Future; +use std::net::SocketAddr; +use tokio::sync::oneshot; + +pub use config::Config; +pub use error::Error; + +mod config; +mod error; +mod handler; + +pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Result<(), Error> { + let db = database::build_connection_pool(&config.database)?; + let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)? + .ok_or_else(|| { + Error::Other( + "Database not found. Please run the updater prior to starting the server" + .to_string(), + ) + })?; + + let server = start_server(&config, slots_per_epoch as u64, db, async { + let _ = shutdown.await; + })?; + + server.await?; + + Ok(()) +} + +/// Creates a server that will serve requests using information from `config`. +/// +/// The server will create its own connection pool to serve connections to the database. +/// This is separate to the connection pool that is used for the `updater`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the address specified in the config and then return a +/// Future representing the actual server that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn start_server( + config: &FullConfig, + slots_per_epoch: u64, + pool: PgPool, + shutdown: impl Future + Send + Sync + 'static, +) -> Result> + 'static, Error> { + let mut routes = Router::new() + .route("/v1/slots", get(handler::get_slots_by_range)) + .route("/v1/slots/:slot", get(handler::get_slot)) + .route("/v1/slots/lowest", get(handler::get_slot_lowest)) + .route("/v1/slots/highest", get(handler::get_slot_highest)) + .route("/v1/slots/:slot/block", get(handler::get_block)) + .route("/v1/blocks", get(handler::get_blocks_by_range)) + .route("/v1/blocks/:block", get(handler::get_block)) + .route("/v1/blocks/lowest", get(handler::get_block_lowest)) + .route("/v1/blocks/highest", get(handler::get_block_highest)) + .route( + "/v1/blocks/:block/previous", + get(handler::get_block_previous), + ) + .route("/v1/blocks/:block/next", get(handler::get_block_next)) + .route( + "/v1/blocks/:block/proposer", + get(handler::get_block_proposer), + ) + .route("/v1/validators/:validator", get(handler::get_validator)) + .route("/v1/validators/all", get(handler::get_all_validators)) + .route( + "/v1/validators/:validator/latest_proposal", + get(handler::get_validator_latest_proposal), + ) + .route("/v1/clients", get(handler::get_client_breakdown)) + .route( + "/v1/clients/percentages", + get(handler::get_client_breakdown_percentages), + ) + .merge(attestation_routes()) + .merge(blockprint_routes()) + .merge(block_packing_routes()) + .merge(block_rewards_routes()); + + if config.blockprint.enabled && config.updater.attestations { + routes = routes.merge(blockprint_attestation_routes()) + } + + let app = routes + .fallback(route_not_found.into_service()) + .layer(Extension(pool)) + .layer(Extension(slots_per_epoch)); + + let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); + + let server = axum::Server::try_bind(&addr)?.serve(app.into_make_service()); + + let server = server.with_graceful_shutdown(async { + shutdown.await; + }); + + info!("HTTP server listening on {}", addr); + + Ok(server) +} + +// The default route indicating that no available routes matched the request. +async fn route_not_found(uri: Uri) -> (StatusCode, Json) { + ( + StatusCode::METHOD_NOT_ALLOWED, + Json(ErrorMessage { + code: StatusCode::METHOD_NOT_ALLOWED.as_u16(), + message: format!("No route for {uri}"), + stacktraces: vec![], + }), + ) +} diff --git a/watch/src/suboptimal_attestations/database.rs b/watch/src/suboptimal_attestations/database.rs new file mode 100644 index 0000000000..cb947d250a --- /dev/null +++ b/watch/src/suboptimal_attestations/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + schema::{suboptimal_attestations, validators}, + watch_types::{WatchPK, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +use types::Epoch; + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub struct WatchAttestation { + pub index: i32, + pub epoch: Epoch, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchAttestation { + pub fn optimal(index: i32, epoch: Epoch) -> WatchAttestation { + WatchAttestation { + index, + epoch, + source: true, + head: true, + target: true, + } + } +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = suboptimal_attestations)] +pub struct WatchSuboptimalAttestation { + pub epoch_start_slot: WatchSlot, + pub index: i32, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchSuboptimalAttestation { + pub fn to_attestation(&self, slots_per_epoch: u64) -> WatchAttestation { + WatchAttestation { + index: self.index, + epoch: self.epoch_start_slot.epoch(slots_per_epoch), + source: self.source, + head: self.head, + target: self.target, + } + } +} + +/// Insert a batch of values into the `suboptimal_attestations` table +/// +/// Since attestations technically occur per-slot but we only store them per-epoch (via its +/// `start_slot`) so if any slot in the epoch changes, we need to resync the whole epoch as a +/// 'suboptimal' attestation could now be 'optimal'. +/// +/// This is handled in the update code, where in the case of a re-org, the affected epoch is +/// deleted completely. +/// +/// On a conflict, it will do nothing. +pub fn insert_batch_suboptimal_attestations( + conn: &mut PgConn, + attestations: Vec, +) -> Result<(), Error> { + use self::suboptimal_attestations::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in attestations.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(suboptimal_attestations) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Attestations inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is minimum. +pub fn get_lowest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.asc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is maximum. +pub fn get_highest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.desc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding to a given +/// `index_query` and `epoch_query`. +pub fn get_attestation_by_index( + conn: &mut PgConn, + index_query: i32, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + let timer = Instant::now(); + + let result = suboptimal_attestations + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(index.eq(index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {index_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding +/// to a given `pubkey_query` and `epoch_query`. +#[allow(dead_code)] +pub fn get_attestation_by_pubkey( + conn: &mut PgConn, + pubkey_query: WatchPK, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + use self::validators::dsl::{public_key, validators}; + let timer = Instant::now(); + + let join = validators.inner_join(suboptimal_attestations); + + let result = join + .select((epoch_start_slot, index, source, head, target)) + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(public_key.eq(pubkey_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {pubkey_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `source == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_source( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(source.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `head == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_head( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(head.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `target == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_target( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(target.eq(false)) + .load::(conn)?) +} + +/// Selects all rows from the `suboptimal_attestations` table for the given +/// `epoch_start_slot_query`. +pub fn get_all_suboptimal_attestations_for_epoch( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .load::(conn)?) +} diff --git a/watch/src/suboptimal_attestations/mod.rs b/watch/src/suboptimal_attestations/mod.rs new file mode 100644 index 0000000000..a94532e8ab --- /dev/null +++ b/watch/src/suboptimal_attestations/mod.rs @@ -0,0 +1,56 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use server::{attestation_routes, blockprint_attestation_routes}; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/attestation_performance`. +/// Formats the response into a vector of `WatchSuboptimalAttestation`. +/// +/// Any attestations with `source == true && head == true && target == true` are ignored. +pub async fn get_attestation_performances( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + let mut output = Vec::new(); + let result = bn + .get_lighthouse_analysis_attestation_performance( + start_epoch, + end_epoch, + "global".to_string(), + ) + .await?; + for index in result { + for epoch in index.epochs { + if epoch.1.active { + // Check if the attestation is suboptimal. + if !epoch.1.source || !epoch.1.head || !epoch.1.target { + output.push(WatchSuboptimalAttestation { + epoch_start_slot: WatchSlot::from_slot( + Epoch::new(epoch.0).start_slot(slots_per_epoch), + ), + index: index.index as i32, + source: epoch.1.source, + head: epoch.1.head, + target: epoch.1.target, + }) + } + } + } + } + Ok(output) +} diff --git a/watch/src/suboptimal_attestations/server.rs b/watch/src/suboptimal_attestations/server.rs new file mode 100644 index 0000000000..391db9a41b --- /dev/null +++ b/watch/src/suboptimal_attestations/server.rs @@ -0,0 +1,299 @@ +use crate::database::{ + get_canonical_slot, get_connection, get_validator_by_index, get_validator_by_public_key, + get_validators_clients_at_slot, get_validators_latest_proposer_info, PgPool, WatchPK, + WatchSlot, +}; + +use crate::blockprint::database::construct_validator_blockprints_at_slot; +use crate::server::Error; +use crate::suboptimal_attestations::database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, + get_validators_missed_head, get_validators_missed_source, get_validators_missed_target, + WatchAttestation, WatchSuboptimalAttestation, +}; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use std::collections::{HashMap, HashSet}; +use std::str::FromStr; +use types::Epoch; + +// Will return Ok(None) if the epoch is not synced or if the validator does not exist. +// In the future it might be worth differentiating these events. +pub async fn get_validator_attestation( + Path((validator_query, epoch_query)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + let epoch = Epoch::new(epoch_query); + + // Ensure the database has synced the target epoch. + if get_canonical_slot( + &mut conn, + WatchSlot::from_slot(epoch.end_slot(slots_per_epoch)), + )? + .is_none() + { + // Epoch is not fully synced. + return Ok(Json(None)); + } + + let index = if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + get_validator_by_public_key(&mut conn, pubkey)? + .ok_or(Error::NotFound)? + .index + } else { + i32::from_str(&validator_query).map_err(|_| Error::BadRequest)? + }; + let attestation = if let Some(suboptimal_attestation) = + get_attestation_by_index(&mut conn, index, epoch, slots_per_epoch)? + { + Some(suboptimal_attestation.to_attestation(slots_per_epoch)) + } else { + // Attestation was not in database. Check if the validator was active. + match get_validator_by_index(&mut conn, index)? { + Some(validator) => { + if let Some(activation_epoch) = validator.activation_epoch { + if activation_epoch <= epoch.as_u64() as i32 { + if let Some(exit_epoch) = validator.exit_epoch { + if exit_epoch > epoch.as_u64() as i32 { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } else { + // Validator has exited. + None + } + } else { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } + } else { + // Validator is not yet active. + None + } + } else { + // Validator is not yet active. + None + } + } + None => return Err(Error::Other("Validator index does not exist".to_string())), + } + }; + Ok(Json(attestation)) +} + +pub async fn get_all_validators_attestations( + Path(epoch): Path, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + Ok(Json(get_all_suboptimal_attestations_for_epoch( + &mut conn, + epoch_start_slot, + )?)) +} + +pub async fn get_validators_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + match vote.to_lowercase().as_str() { + "source" => Ok(Json(get_validators_missed_source( + &mut conn, + epoch_start_slot, + )?)), + "head" => Ok(Json(get_validators_missed_head( + &mut conn, + epoch_start_slot, + )?)), + "target" => Ok(Json(get_validators_missed_target( + &mut conn, + epoch_start_slot, + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validators_missed_vote_graffiti( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let graffitis = get_validators_latest_proposer_info(&mut conn, indices)? + .values() + .map(|info| info.graffiti.clone()) + .collect::>(); + + let mut result = HashMap::new(); + for graffiti in graffitis { + if !result.contains_key(&graffiti) { + result.insert(graffiti.clone(), 0); + } + *result + .get_mut(&graffiti) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + + Ok(Json(result)) +} + +pub fn attestation_routes() -> Router { + Router::new() + .route( + "/v1/validators/:validator/attestation/:epoch", + get(get_validator_attestation), + ) + .route( + "/v1/validators/all/attestation/:epoch", + get(get_all_validators_attestations), + ) + .route( + "/v1/validators/missed/:vote/:epoch", + get(get_validators_missed_vote), + ) + .route( + "/v1/validators/missed/:vote/:epoch/graffiti", + get(get_validators_missed_vote_graffiti), + ) +} + +/// The functions below are dependent on Blockprint and if it is disabled, the endpoints will be +/// disabled. +pub async fn get_clients_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + // All validators which missed the vote. + let indices_map = indices.into_iter().collect::>(); + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + // All validators. + let client_map = + construct_validator_blockprints_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + + for index in indices_map { + if let Some(print) = client_map.get(&index) { + if !result.contains_key(print) { + result.insert(print.clone(), 0); + } + *result + .get_mut(print) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool.clone()), + Extension(slots_per_epoch), + ) + .await?; + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + let mut conn = get_connection(&pool)?; + let totals = get_validators_clients_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + let client_total: f64 = *totals + .get(client) + .ok_or_else(|| Error::Other("Client type mismatch".to_string()))? + as f64; + // `client_total` should never be `0`, but if it is, return `0` instead of `inf`. + if client_total == 0.0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / client_total * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages_relative( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let mut total: u64 = 0; + for (_, count) in clients_counts.iter() { + total += *count + } + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + // `total` should never be 0, but if it is, return `-` instead of `inf`. + if total == 0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub fn blockprint_attestation_routes() -> Router { + Router::new() + .route( + "/v1/clients/missed/:vote/:epoch", + get(get_clients_missed_vote), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages", + get(get_clients_missed_vote_percentages), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages/relative", + get(get_clients_missed_vote_percentages_relative), + ) +} diff --git a/watch/src/suboptimal_attestations/updater.rs b/watch/src/suboptimal_attestations/updater.rs new file mode 100644 index 0000000000..aeabff2035 --- /dev/null +++ b/watch/src/suboptimal_attestations/updater.rs @@ -0,0 +1,236 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::suboptimal_attestations::get_attestation_performances; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `suboptimal_attestations` table starting from the entry with the highest + /// slot. + /// + /// It construts a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest canonical slot) + /// `end_epoch` -> epoch of highest canonical slot + /// + /// It will resync the latest epoch if it is not fully filled but will not overwrite existing + /// values unless there is a re-org. + /// That is, `if highest_filled_slot % slots_per_epoch != 31`. + /// + /// In the event the most recent epoch has no suboptimal attestations, it will attempt to + /// resync that epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn fill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let highest_filled_slot_opt = if self.config.attestations { + database::get_highest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot % self.slots_per_epoch == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No rows present in the `suboptimal_attestations` table. Use `canonical_slots` + // instead. + if let Some(lowest_canonical_slot) = database::get_lowest_canonical_slot(&mut conn)? { + lowest_canonical_slot + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no slots in the database, do not fill the `suboptimal_attestations` + // table. + warn!("Refusing to fill the `suboptimal_attestations` table as there are no slots in the database"); + return Ok(()); + } + }; + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut end_epoch = highest_canonical_slot.epoch(self.slots_per_epoch); + + // The `lighthouse/analysis/attestation_performance` endpoint can only retrieve attestations + // which are more than 1 epoch old. + // We assume that `highest_canonical_slot` is near the head of the chain. + end_epoch = end_epoch.saturating_sub(2_u64); + + // If end_epoch == 0 then the chain just started so we need to wait until + // `current_epoch >= 2`. + if end_epoch == 0 { + debug!("Chain just begun, refusing to sync attestations"); + return Ok(()); + } + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert attestations with corresponding `canonical_slot`s. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest canonical slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slots` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `suboptimal_attestations` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> epoch of the lowest `canonical_slot`. + /// `end_epoch` -> epoch of the lowest filled `suboptimal_attestation` - 1 (or epoch of highest + /// canonical slot) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// + /// In the event there are no suboptimal attestations present in the lowest epoch, it will attempt to + /// resync the epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn backfill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_attestation_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `suboptimal_attestations` table. + let lowest_filled_slot_opt = if self.config.attestations { + database::get_lowest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot % self.slots_per_epoch == 0 { + lowest_filled_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No entries in the `suboptimal_attestations` table. Use `canonical_slots` instead. + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + // Subtract 2 since `end_epoch` must be less than the current epoch - 1. + // We assume that `highest_canonical_slot` is near the head of the chain. + highest_canonical_slot + .epoch(self.slots_per_epoch) + .saturating_sub(2_u64) + } else { + // There are no slots in the database, do not backfill the + // `suboptimal_attestations` table. + warn!("Refusing to backfill attestations as there are no slots in the database"); + return Ok(()); + } + }; + + if end_epoch == 0 { + debug!("Attestations backfill is complete"); + return Ok(()); + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut start_epoch = lowest_canonical_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the base of the database"); + return Ok(()); + } + + // Ensure the request range does not exceed `max_attestation_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + if start_epoch < end_epoch.saturating_sub(max_attestation_backfill) { + start_epoch = end_epoch.saturating_sub(max_attestation_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) + } + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert `suboptimal_attestations` with corresponding `canonical_slots`. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slot` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/updater/config.rs b/watch/src/updater/config.rs new file mode 100644 index 0000000000..0179be73db --- /dev/null +++ b/watch/src/updater/config.rs @@ -0,0 +1,65 @@ +use serde::{Deserialize, Serialize}; + +pub const BEACON_NODE_URL: &str = "http://127.0.0.1:5052"; + +pub const fn max_backfill_size_epochs() -> u64 { + 2 +} +pub const fn backfill_stop_epoch() -> u64 { + 0 +} +pub const fn attestations() -> bool { + true +} +pub const fn proposer_info() -> bool { + true +} +pub const fn block_rewards() -> bool { + true +} +pub const fn block_packing() -> bool { + true +} + +fn beacon_node_url() -> String { + BEACON_NODE_URL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + /// The URL of the beacon you wish to sync from. + #[serde(default = "beacon_node_url")] + pub beacon_node_url: String, + /// The maximum size each backfill iteration will allow per request (in epochs). + #[serde(default = "max_backfill_size_epochs")] + pub max_backfill_size_epochs: u64, + /// The epoch at which to never backfill past. + #[serde(default = "backfill_stop_epoch")] + pub backfill_stop_epoch: u64, + /// Whether to sync the suboptimal_attestations table. + #[serde(default = "attestations")] + pub attestations: bool, + /// Whether to sync the proposer_info table. + #[serde(default = "proposer_info")] + pub proposer_info: bool, + /// Whether to sync the block_rewards table. + #[serde(default = "block_rewards")] + pub block_rewards: bool, + /// Whether to sync the block_packing table. + #[serde(default = "block_packing")] + pub block_packing: bool, +} + +impl Default for Config { + fn default() -> Self { + Self { + beacon_node_url: beacon_node_url(), + max_backfill_size_epochs: max_backfill_size_epochs(), + backfill_stop_epoch: backfill_stop_epoch(), + attestations: attestations(), + proposer_info: proposer_info(), + block_rewards: block_rewards(), + block_packing: block_packing(), + } + } +} diff --git a/watch/src/updater/error.rs b/watch/src/updater/error.rs new file mode 100644 index 0000000000..74091c8f21 --- /dev/null +++ b/watch/src/updater/error.rs @@ -0,0 +1,56 @@ +use crate::blockprint::Error as BlockprintError; +use crate::database::Error as DbError; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{Error as Eth2Error, SensitiveError}; +use std::fmt; + +#[derive(Debug)] +pub enum Error { + BeaconChain(BeaconChainError), + Eth2(Eth2Error), + SensitiveUrl(SensitiveError), + Database(DbError), + Blockprint(BlockprintError), + UnableToGetRemoteHead, + BeaconNodeSyncing, + NotEnabled(String), + NoValidatorsFound, + BeaconNodeNotCompatible(String), + InvalidConfig(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChain(e) + } +} + +impl From for Error { + fn from(e: Eth2Error) -> Self { + Error::Eth2(e) + } +} + +impl From for Error { + fn from(e: SensitiveError) -> Self { + Error::SensitiveUrl(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: BlockprintError) -> Self { + Error::Blockprint(e) + } +} diff --git a/watch/src/updater/handler.rs b/watch/src/updater/handler.rs new file mode 100644 index 0000000000..1e1662bf74 --- /dev/null +++ b/watch/src/updater/handler.rs @@ -0,0 +1,471 @@ +use crate::blockprint::WatchBlockprintClient; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool, WatchCanonicalSlot, WatchHash, WatchSlot}; +use crate::updater::{Config, Error, WatchSpec}; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{ + types::{BlockId, SyncingData}, + BeaconNodeHttpClient, SensitiveUrl, +}; +use log::{debug, error, info, warn}; +use std::collections::HashSet; +use std::iter::FromIterator; +use types::{BeaconBlockHeader, EthSpec, Hash256, SignedBeaconBlock, Slot}; + +use crate::updater::{get_beacon_block, get_header, get_validators}; + +const MAX_EXPECTED_REORG_LENGTH: u64 = 32; + +/// Ensure the existing database is valid for this run. +pub async fn ensure_valid_database( + spec: &WatchSpec, + pool: &mut PgPool, +) -> Result<(), Error> { + let mut conn = database::get_connection(pool)?; + + let bn_slots_per_epoch = spec.slots_per_epoch(); + let bn_config_name = spec.network.clone(); + + if let Some((db_config_name, db_slots_per_epoch)) = database::get_active_config(&mut conn)? { + if db_config_name != bn_config_name || db_slots_per_epoch != bn_slots_per_epoch as i32 { + Err(Error::InvalidConfig( + "The config stored in the database does not match the beacon node.".to_string(), + )) + } else { + // Configs match. + Ok(()) + } + } else { + // No config exists in the DB. + database::insert_active_config(&mut conn, bn_config_name, bn_slots_per_epoch)?; + Ok(()) + } +} + +pub struct UpdateHandler { + pub pool: PgPool, + pub bn: BeaconNodeHttpClient, + pub blockprint: Option, + pub config: Config, + pub slots_per_epoch: u64, + pub spec: WatchSpec, +} + +impl UpdateHandler { + pub async fn new( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, + ) -> Result, Error> { + let blockprint = if config.blockprint.enabled { + if let Some(server) = config.blockprint.url { + let blockprint_url = SensitiveUrl::parse(&server).map_err(Error::SensitiveUrl)?; + Some(WatchBlockprintClient { + client: reqwest::Client::new(), + server: blockprint_url, + username: config.blockprint.username, + password: config.blockprint.password, + }) + } else { + return Err(Error::NotEnabled( + "blockprint was enabled but url was not set".to_string(), + )); + } + } else { + None + }; + + let mut pool = database::build_connection_pool(&config.database)?; + + ensure_valid_database(&spec, &mut pool).await?; + + Ok(Self { + pool, + bn, + blockprint, + config: config.updater, + slots_per_epoch: spec.slots_per_epoch(), + spec, + }) + } + + /// Gets the syncing status of the connected beacon node. + pub async fn get_bn_syncing_status(&mut self) -> Result { + Ok(self.bn.get_node_syncing().await?.data) + } + + /// Gets a list of block roots from the database which do not yet contain a corresponding + /// entry in the `beacon_blocks` table and inserts them. + pub async fn update_unknown_blocks(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let roots = database::get_unknown_canonical_blocks(&mut conn)?; + for root in roots { + let block_opt: Option> = + get_beacon_block(&self.bn, BlockId::Root(root.as_hash())).await?; + if let Some(block) = block_opt { + database::insert_beacon_block(&mut conn, block, root)?; + } + } + + Ok(()) + } + + /// Performs a head update with the following steps: + /// 1. Pull the latest header from the beacon node and the latest canonical slot from the + /// database. + /// 2. Loop back through the beacon node and database to find the first matching slot -> root + /// pair. + /// 3. Go back `MAX_EXPECTED_REORG_LENGTH` slots through the database ensuring it is + /// consistent with the beacon node. If a re-org occurs beyond this range, we cannot recover. + /// 4. Remove any invalid slots from the database. + /// 5. Sync all blocks between the first valid block of the database and the head of the beacon + /// chain. + /// + /// In the event there are no slots present in the database, it will sync from the head block + /// block back to the first slot of the epoch. + /// This will ensure backfills are always done in full epochs (which helps keep certain syncing + /// tasks efficient). + pub async fn perform_head_update(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + // Load the head from the beacon node. + let bn_header = get_header(&self.bn, BlockId::Head) + .await? + .ok_or(Error::UnableToGetRemoteHead)?; + let header_root = bn_header.canonical_root(); + + if let Some(latest_matching_canonical_slot) = + self.get_first_matching_block(bn_header.clone()).await? + { + // Check for reorgs. + let latest_db_slot = self.check_for_reorg(latest_matching_canonical_slot).await?; + + // Remove all slots above `latest_db_slot` from the database. + let result = database::delete_canonical_slots_above( + &mut conn, + WatchSlot::from_slot(latest_db_slot), + )?; + info!("{result} old records removed during head update"); + + if result > 0 { + // If slots were removed, we need to resync the suboptimal_attestations table for + // the epoch since they will have changed and cannot be fixed by a simple update. + let epoch = latest_db_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64); + debug!("Preparing to resync attestations above epoch {epoch}"); + database::delete_suboptimal_attestations_above( + &mut conn, + WatchSlot::from_slot(epoch.start_slot(self.slots_per_epoch)), + )?; + } + + // Since we are syncing backwards, `start_slot > `end_slot`. + let start_slot = bn_header.slot; + let end_slot = latest_db_slot + 1; + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + + // Attempt to sync new blocks with blockprint. + //self.sync_blockprint_until(start_slot).await?; + } else { + // There are no matching parent blocks. Sync from the head block back until the first + // block of the epoch. + let start_slot = bn_header.slot; + let end_slot = start_slot.saturating_sub(start_slot % self.slots_per_epoch); + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + } + + Ok(()) + } + + /// Attempt to find a row in the `canonical_slots` table which matches the `canonical_root` of + /// the block header as reported by the beacon node. + /// + /// Any blocks above this value are not canonical according to the beacon node. + /// + /// Note: In the event that there are skip slots above the slot returned by the function, + /// they will not be returned, so may be pruned or re-synced by other code despite being + /// canonical. + pub async fn get_first_matching_block( + &mut self, + mut bn_header: BeaconBlockHeader, + ) -> Result, Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Load latest non-skipped canonical slot from database. + if let Some(db_canonical_slot) = + database::get_highest_non_skipped_canonical_slot(&mut conn)? + { + // Check if the header or parent root matches the entry in the database. + if bn_header.parent_root == db_canonical_slot.root.as_hash() + || bn_header.canonical_root() == db_canonical_slot.root.as_hash() + { + Ok(Some(db_canonical_slot)) + } else { + // Header is not the child of the highest entry in the database. + // From here we need to iterate backwards through the database until we find + // a slot -> root pair that matches the beacon node. + loop { + // Store working `parent_root`. + let parent_root = bn_header.parent_root; + + // Try the next header. + let next_header = get_header(&self.bn, BlockId::Root(parent_root)).await?; + if let Some(header) = next_header { + bn_header = header.clone(); + if let Some(db_canonical_slot) = database::get_canonical_slot_by_root( + &mut conn, + WatchHash::from_hash(header.parent_root), + )? { + // Check if the entry in the database matches the parent of + // the header. + if header.parent_root == db_canonical_slot.root.as_hash() { + return Ok(Some(db_canonical_slot)); + } else { + // Move on to the next header. + continue; + } + } else { + // Database does not have the referenced root. Try the next header. + continue; + } + } else { + // If we get this error it means that the `parent_root` of the header + // did not reference a canonical block. + return Err(Error::BeaconChain(BeaconChainError::MissingBeaconBlock( + parent_root, + ))); + } + } + } + } else { + // There are no non-skipped blocks present in the database. + Ok(None) + } + } + + /// Given the latest slot in the database which matches a root in the beacon node, + /// traverse back through the database for `MAX_EXPECTED_REORG_LENGTH` slots to ensure the tip + /// of the database is consistent with the beacon node (in the case that reorgs have occured). + /// + /// Returns the slot before the oldest canonical_slot which has an invalid child. + pub async fn check_for_reorg( + &mut self, + latest_canonical_slot: WatchCanonicalSlot, + ) -> Result { + let mut conn = database::get_connection(&self.pool)?; + + let end_slot = latest_canonical_slot.slot.as_u64(); + let start_slot = end_slot.saturating_sub(MAX_EXPECTED_REORG_LENGTH); + + for i in start_slot..end_slot { + let slot = Slot::new(i); + let db_canonical_slot_opt = + database::get_canonical_slot(&mut conn, WatchSlot::from_slot(slot))?; + if let Some(db_canonical_slot) = db_canonical_slot_opt { + let header_opt = get_header(&self.bn, BlockId::Slot(slot)).await?; + if let Some(header) = header_opt { + if header.canonical_root() == db_canonical_slot.root.as_hash() { + // The roots match (or are both skip slots). + continue; + } else { + // The block roots do not match. We need to re-sync from here. + warn!("Block {slot} does not match the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else if !db_canonical_slot.skipped { + // The block exists in the database, but does not exist on the beacon node. + // We need to re-sync from here. + warn!("Block {slot} does not exist on the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else { + // This slot does not exist in the database. + let lowest_slot = database::get_lowest_canonical_slot(&mut conn)? + .map(|canonical_slot| canonical_slot.slot.as_slot()); + if lowest_slot > Some(slot) { + // The database has not back-filled this slot yet, so skip it. + continue; + } else { + // The database does not contain this block, but has back-filled past it. + // We need to resync from here. + warn!("Slot {slot} missing from database. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } + } + + // The database is consistent with the beacon node, so return the head of the database. + Ok(latest_canonical_slot.slot.as_slot()) + } + + /// Fills the canonical slots table beginning from `start_slot` and ending at `end_slot`. + /// It fills in reverse order, that is, `start_slot` is higher than `end_slot`. + /// + /// Skip slots set `root` to the root of the previous non-skipped slot and also sets + /// `skipped == true`. + /// + /// Since it uses `insert_canonical_slot` to interact with the database, it WILL NOT overwrite + /// existing rows. This means that any part of the chain within `end_slot..=start_slot` that + /// needs to be resynced, must first be deleted from the database. + pub async fn reverse_fill_canonical_slots( + &mut self, + mut header: BeaconBlockHeader, + mut header_root: Hash256, + mut skipped: bool, + start_slot: Slot, + end_slot: Slot, + ) -> Result { + let mut count = 0; + + let mut conn = database::get_connection(&self.pool)?; + + // Iterate, descending from `start_slot` (higher) to `end_slot` (lower). + for slot in (end_slot.as_u64()..=start_slot.as_u64()).rev() { + // Insert header. + database::insert_canonical_slot( + &mut conn, + WatchCanonicalSlot { + slot: WatchSlot::new(slot), + root: WatchHash::from_hash(header_root), + skipped, + beacon_block: None, + }, + )?; + count += 1; + + // Load the next header: + // We must use BlockId::Slot since we want to include skip slots. + header = if let Some(new_header) = get_header( + &self.bn, + BlockId::Slot(Slot::new(slot.saturating_sub(1_u64))), + ) + .await? + { + header_root = new_header.canonical_root(); + skipped = false; + new_header + } else { + if header.slot == 0 { + info!("Reverse fill exhausted at slot 0"); + break; + } + // Slot was skipped, so use the parent_root (most recent non-skipped block). + skipped = true; + header_root = header.parent_root; + header + }; + } + + Ok(count) + } + + /// Backfills the `canonical_slots` table starting from the lowest non-skipped slot and + /// stopping after `max_backfill_size_epochs` epochs. + pub async fn backfill_canonical_slots(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let backfill_stop_slot = self.config.backfill_stop_epoch * self.slots_per_epoch; + // Check to see if we have finished backfilling. + if let Some(lowest_slot) = database::get_lowest_canonical_slot(&mut conn)? { + if lowest_slot.slot.as_slot() == backfill_stop_slot { + debug!("Backfill sync complete, all slots filled"); + return Ok(()); + } + } + + let backfill_slot_count = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + if let Some(lowest_non_skipped_canonical_slot) = + database::get_lowest_non_skipped_canonical_slot(&mut conn)? + { + // Set `start_slot` equal to the lowest non-skipped slot in the database. + // While this will attempt to resync some parts of the bottom of the chain, it reduces + // complexity when dealing with skip slots. + let start_slot = lowest_non_skipped_canonical_slot.slot.as_slot(); + let mut end_slot = lowest_non_skipped_canonical_slot + .slot + .as_slot() + .saturating_sub(backfill_slot_count); + + // Ensure end_slot doesn't go below `backfill_stop_epoch` + if end_slot <= backfill_stop_slot { + end_slot = Slot::new(backfill_stop_slot); + } + + let header_opt = get_header(&self.bn, BlockId::Slot(start_slot)).await?; + + if let Some(header) = header_opt { + let header_root = header.canonical_root(); + let count = self + .reverse_fill_canonical_slots(header, header_root, false, start_slot, end_slot) + .await?; + + info!("Backfill completed to slot: {end_slot}, records added: {count}"); + } else { + // The lowest slot of the database is inconsistent with the beacon node. + // Currently we have no way to recover from this. The entire database will need to + // be re-synced. + error!( + "Database is inconsistent with the beacon node. \ + Please ensure your beacon node is set to the right network, \ + otherwise you may need to resync" + ); + } + } else { + // There are no blocks in the database. Forward sync needs to happen first. + info!("Backfill was not performed since there are no blocks in the database"); + return Ok(()); + }; + + Ok(()) + } + + // Attempt to update the validator set. + // This downloads the latest validator set from the beacon node, and pulls the known validator + // set from the database. + // We then take any new or updated validators and insert them into the database (overwriting + // exiting validators). + // + // In the event there are no validators in the database, it will initialize the validator set. + pub async fn update_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let current_validators = database::get_all_validators(&mut conn)?; + + if !current_validators.is_empty() { + let old_validators = HashSet::from_iter(current_validators); + + // Pull the new validator set from the beacon node. + let new_validators = get_validators(&self.bn).await?; + + // The difference should only contain validators that contain either a new `exit_epoch` (implying an + // exit) or a new `index` (implying a validator activation). + let val_diff = new_validators.difference(&old_validators); + + for diff in val_diff { + database::insert_validator(&mut conn, diff.clone())?; + } + } else { + info!("No validators present in database. Initializing the validator set"); + self.initialize_validator_set().await?; + } + + Ok(()) + } + + // Initialize the validator set by downloading it from the beacon node, inserting blockprint + // data (if required) and writing it to the database. + pub async fn initialize_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Pull all validators from the beacon node. + let validators = Vec::from_iter(get_validators(&self.bn).await?); + + database::insert_batch_validators(&mut conn, validators)?; + + Ok(()) + } +} diff --git a/watch/src/updater/mod.rs b/watch/src/updater/mod.rs new file mode 100644 index 0000000000..1fbb0107ae --- /dev/null +++ b/watch/src/updater/mod.rs @@ -0,0 +1,234 @@ +use crate::config::Config as FullConfig; +use crate::database::{WatchPK, WatchValidator}; +use eth2::{ + types::{BlockId, StateId}, + BeaconNodeHttpClient, SensitiveUrl, Timeouts, +}; +use log::{debug, error, info}; +use std::collections::{HashMap, HashSet}; +use std::marker::PhantomData; +use std::time::{Duration, Instant}; +use types::{BeaconBlockHeader, EthSpec, GnosisEthSpec, MainnetEthSpec, SignedBeaconBlock}; + +pub use config::Config; +pub use error::Error; +pub use handler::UpdateHandler; + +mod config; +pub mod error; +pub mod handler; + +const FAR_FUTURE_EPOCH: u64 = u64::MAX; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +const MAINNET: &str = "mainnet"; +const GNOSIS: &str = "gnosis"; + +pub struct WatchSpec { + network: String, + spec: PhantomData, +} + +impl WatchSpec { + fn slots_per_epoch(&self) -> u64 { + T::slots_per_epoch() + } +} + +impl WatchSpec { + pub fn mainnet(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +impl WatchSpec { + fn gnosis(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +pub async fn run_updater(config: FullConfig) -> Result<(), Error> { + let beacon_node_url = + SensitiveUrl::parse(&config.updater.beacon_node_url).map_err(Error::SensitiveUrl)?; + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + + let config_map = bn.get_config_spec::>().await?.data; + + let config_name = config_map + .get("CONFIG_NAME") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field CONFIG_NAME on beacon node spec".to_string()) + })? + .clone(); + + match config_map + .get("PRESET_BASE") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field PRESET_BASE on beacon node spec".to_string()) + })? + .to_lowercase() + .as_str() + { + MAINNET => { + let spec = WatchSpec::mainnet(config_name); + run_once(bn, spec, config).await + } + GNOSIS => { + let spec = WatchSpec::gnosis(config_name); + run_once(bn, spec, config).await + } + _ => unimplemented!("unsupported PRESET_BASE"), + } +} + +pub async fn run_once( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, +) -> Result<(), Error> { + let mut watch = UpdateHandler::new(bn, spec, config.clone()).await?; + + let sync_data = watch.get_bn_syncing_status().await?; + if sync_data.is_syncing { + error!( + "Connected beacon node is still syncing: head_slot => {:?}, distance => {}", + sync_data.head_slot, sync_data.sync_distance + ); + return Err(Error::BeaconNodeSyncing); + } + + info!("Performing head update"); + let head_timer = Instant::now(); + watch.perform_head_update().await?; + let head_timer_elapsed = head_timer.elapsed(); + debug!("Head update complete, time taken: {head_timer_elapsed:?}"); + + info!("Performing block backfill"); + let block_backfill_timer = Instant::now(); + watch.backfill_canonical_slots().await?; + let block_backfill_timer_elapsed = block_backfill_timer.elapsed(); + debug!("Block backfill complete, time taken: {block_backfill_timer_elapsed:?}"); + + info!("Updating validator set"); + let validator_timer = Instant::now(); + watch.update_validator_set().await?; + let validator_timer_elapsed = validator_timer.elapsed(); + debug!("Validator update complete, time taken: {validator_timer_elapsed:?}"); + + // Update blocks after updating the validator set since the `proposer_index` must exist in the + // `validators` table. + info!("Updating unknown blocks"); + let unknown_block_timer = Instant::now(); + watch.update_unknown_blocks().await?; + let unknown_block_timer_elapsed = unknown_block_timer.elapsed(); + debug!("Unknown block update complete, time taken: {unknown_block_timer_elapsed:?}"); + + // Run additional modules + if config.updater.attestations { + info!("Updating suboptimal attestations"); + let attestation_timer = Instant::now(); + watch.fill_suboptimal_attestations().await?; + watch.backfill_suboptimal_attestations().await?; + let attestation_timer_elapsed = attestation_timer.elapsed(); + debug!("Attestation update complete, time taken: {attestation_timer_elapsed:?}"); + } + + if config.updater.block_rewards { + info!("Updating block rewards"); + let rewards_timer = Instant::now(); + watch.fill_block_rewards().await?; + watch.backfill_block_rewards().await?; + let rewards_timer_elapsed = rewards_timer.elapsed(); + debug!("Block Rewards update complete, time taken: {rewards_timer_elapsed:?}"); + } + + if config.updater.block_packing { + info!("Updating block packing statistics"); + let packing_timer = Instant::now(); + watch.fill_block_packing().await?; + watch.backfill_block_packing().await?; + let packing_timer_elapsed = packing_timer.elapsed(); + debug!("Block packing update complete, time taken: {packing_timer_elapsed:?}"); + } + + if config.blockprint.enabled { + info!("Updating blockprint"); + let blockprint_timer = Instant::now(); + watch.fill_blockprint().await?; + watch.backfill_blockprint().await?; + let blockprint_timer_elapsed = blockprint_timer.elapsed(); + debug!("Blockprint update complete, time taken: {blockprint_timer_elapsed:?}"); + } + + Ok(()) +} + +/// Queries the beacon node for a given `BlockId` and returns the `BeaconBlockHeader` if it exists. +pub async fn get_header( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result, Error> { + let resp = bn + .get_beacon_headers_block_id(block_id) + .await? + .map(|resp| (resp.data.root, resp.data.header.message)); + // When quering with root == 0x000... , slot 0 will be returned with parent_root == 0x0000... + // This check escapes the loop. + if let Some((root, header)) = resp { + if root == header.parent_root { + return Ok(None); + } else { + return Ok(Some(header)); + } + } + Ok(None) +} + +pub async fn get_beacon_block( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result>, Error> { + let block = bn.get_beacon_blocks(block_id).await?.map(|resp| resp.data); + + Ok(block) +} + +/// Queries the beacon node for the current validator set. +pub async fn get_validators(bn: &BeaconNodeHttpClient) -> Result, Error> { + let mut validator_map = HashSet::new(); + + let validators = bn + .get_beacon_states_validators(StateId::Head, None, None) + .await? + .ok_or(Error::NoValidatorsFound)? + .data; + + for val in validators { + // Only store `activation_epoch` if it not the `FAR_FUTURE_EPOCH`. + let activation_epoch = if val.validator.activation_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.activation_epoch.as_u64() as i32) + }; + // Only store `exit_epoch` if it is not the `FAR_FUTURE_EPOCH`. + let exit_epoch = if val.validator.exit_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.exit_epoch.as_u64() as i32) + }; + validator_map.insert(WatchValidator { + index: val.index as i32, + public_key: WatchPK::from_pubkey(val.validator.pubkey), + status: val.status.to_string(), + activation_epoch, + exit_epoch, + }); + } + Ok(validator_map) +} diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs new file mode 100644 index 0000000000..acdda8c306 --- /dev/null +++ b/watch/tests/tests.rs @@ -0,0 +1,1254 @@ +#![recursion_limit = "256"] +#![cfg(unix)] + +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +}; +use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use http_api::test_utils::{create_api_server, ApiServer}; +use network::NetworkReceivers; + +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use tokio::sync::oneshot; +use types::{Hash256, MainnetEthSpec, Slot}; +use url::Url; +use watch::{ + client::WatchHttpClient, + config::Config, + database::{self, Config as DatabaseConfig, PgPool, WatchSlot}, + server::{start_server, Config as ServerConfig}, + updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec}, +}; + +use log::error; +use std::net::SocketAddr; +use std::time::Duration; +use tokio::{runtime, task::JoinHandle}; +use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; +use unused_port::unused_tcp4_port; + +use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage}; + +type E = MainnetEthSpec; + +const VALIDATOR_COUNT: usize = 32; +const SLOTS_PER_EPOCH: u64 = 32; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +fn build_test_config(config: &DatabaseConfig) -> PostgresConfig { + let mut postgres_config = PostgresConfig::new(); + postgres_config + .user(&config.user) + .password(&config.password) + .dbname(&config.default_dbname) + .host(&config.host) + .port(config.port) + .connect_timeout(Duration::from_millis(config.connect_timeout_millis)); + postgres_config +} + +async fn connect(config: &DatabaseConfig) -> (Client, JoinHandle<()>) { + let db_config = build_test_config(config); + let (client, conn) = db_config + .connect(NoTls) + .await + .expect("Could not connect to db"); + let connection = runtime::Handle::current().spawn(async move { + if let Err(e) = conn.await { + error!("Connection error {:?}", e); + } + }); + + (client, connection) +} + +pub async fn create_test_database(config: &DatabaseConfig) { + let (db, _) = connect(config).await; + + db.execute(&format!("CREATE DATABASE {};", config.dbname), &[]) + .await + .expect("Database creation failed"); +} + +struct TesterBuilder { + pub harness: BeaconChainHarness>, + pub config: Config, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, +} + +impl TesterBuilder { + pub async fn new() -> TesterBuilder { + let harness = BeaconChainHarness::builder(E::default()) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(); + + /* + * Spawn a Beacon Node HTTP API. + */ + let ApiServer { + server, + listening_socket: bn_api_listening_socket, + shutdown_tx: _bn_api_shutdown_tx, + network_rx: _bn_network_rx, + .. + } = create_api_server(harness.chain.clone(), harness.logger().clone()).await; + tokio::spawn(server); + + /* + * Create a watch configuration + */ + let database_port = unused_tcp4_port().expect("Unable to find unused port."); + let server_port = unused_tcp4_port().expect("Unable to find unused port."); + let config = Config { + database: DatabaseConfig { + dbname: random_dbname(), + port: database_port, + ..Default::default() + }, + server: ServerConfig { + listen_port: server_port, + ..Default::default() + }, + updater: UpdaterConfig { + beacon_node_url: format!( + "http://{}:{}", + bn_api_listening_socket.ip(), + bn_api_listening_socket.port() + ), + ..Default::default() + }, + ..Default::default() + }; + + Self { + harness, + config, + _bn_network_rx, + _bn_api_shutdown_tx, + } + } + pub async fn build(self, pool: PgPool) -> Tester { + /* + * Spawn a Watch HTTP API. + */ + let (_watch_shutdown_tx, watch_shutdown_rx) = oneshot::channel(); + let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool, async { + let _ = watch_shutdown_rx.await; + }) + .unwrap(); + tokio::spawn(watch_server); + + let addr = SocketAddr::new( + self.config.server.listen_addr, + self.config.server.listen_port, + ); + + /* + * Create a HTTP client to talk to the watch HTTP API. + */ + let client = WatchHttpClient { + client: reqwest::Client::new(), + server: Url::parse(&format!("http://{}:{}", addr.ip(), addr.port())).unwrap(), + }; + + /* + * Create a HTTP client to talk to the Beacon Node API. + */ + let beacon_node_url = SensitiveUrl::parse(&self.config.updater.beacon_node_url).unwrap(); + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + let spec = WatchSpec::mainnet("mainnet".to_string()); + + /* + * Build update service + */ + let updater = UpdateHandler::new(bn, spec, self.config.clone()) + .await + .unwrap(); + + Tester { + harness: self.harness, + client, + config: self.config, + updater, + _bn_network_rx: self._bn_network_rx, + _bn_api_shutdown_tx: self._bn_api_shutdown_tx, + _watch_shutdown_tx, + } + } + async fn initialize_database(&self) -> PgPool { + create_test_database(&self.config.database).await; + database::utils::run_migrations(&self.config.database); + database::build_connection_pool(&self.config.database) + .expect("Could not build connection pool") + } +} + +struct Tester { + pub harness: BeaconChainHarness>, + pub client: WatchHttpClient, + pub config: Config, + pub updater: UpdateHandler, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, + _watch_shutdown_tx: oneshot::Sender<()>, +} + +impl Tester { + /// Extend the chain on the beacon chain harness. Do not update the beacon watch database. + pub async fn extend_chain(&mut self, num_blocks: u64) -> &mut Self { + self.harness.advance_slot(); + self.harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + // Advance the slot clock without a block. This results in a skipped slot. + pub fn skip_slot(&mut self) -> &mut Self { + self.harness.advance_slot(); + self + } + + // Perform a single slot re-org. + pub async fn reorg_chain(&mut self) -> &mut Self { + let previous_slot = self.harness.get_current_slot(); + self.harness.advance_slot(); + let first_slot = self.harness.get_current_slot(); + self.harness + .extend_chain( + 1, + BlockStrategy::ForkCanonicalChainAt { + previous_slot, + first_slot, + }, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + /// Run the watch updater service. + pub async fn run_update_service(&mut self, num_runs: usize) -> &mut Self { + for _ in 0..num_runs { + run_updater(self.config.clone()).await.unwrap(); + } + self + } + + pub async fn perform_head_update(&mut self) -> &mut Self { + self.updater.perform_head_update().await.unwrap(); + self + } + + pub async fn perform_backfill(&mut self) -> &mut Self { + self.updater.backfill_canonical_slots().await.unwrap(); + self + } + + pub async fn update_unknown_blocks(&mut self) -> &mut Self { + self.updater.update_unknown_blocks().await.unwrap(); + self + } + + pub async fn update_validator_set(&mut self) -> &mut Self { + self.updater.update_validator_set().await.unwrap(); + self + } + + pub async fn fill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater.fill_suboptimal_attestations().await.unwrap(); + + self + } + + pub async fn backfill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater + .backfill_suboptimal_attestations() + .await + .unwrap(); + + self + } + + pub async fn fill_block_rewards(&mut self) -> &mut Self { + self.updater.fill_block_rewards().await.unwrap(); + + self + } + + pub async fn backfill_block_rewards(&mut self) -> &mut Self { + self.updater.backfill_block_rewards().await.unwrap(); + + self + } + + pub async fn fill_block_packing(&mut self) -> &mut Self { + self.updater.fill_block_packing().await.unwrap(); + + self + } + + pub async fn backfill_block_packing(&mut self) -> &mut Self { + self.updater.backfill_block_packing().await.unwrap(); + + self + } + + pub async fn assert_canonical_slots_empty(&mut self) -> &mut Self { + let lowest_slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .map(|slot| slot.slot.as_slot()); + + assert_eq!(lowest_slot, None); + + self + } + + pub async fn assert_lowest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_highest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_highest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_canonical_slots_not_empty(&mut self) -> &mut Self { + self.client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_slot_is_skipped(&mut self, slot: u64) -> &mut Self { + assert!(self + .client + .get_beacon_blocks(BlockId::Slot(Slot::new(slot))) + .await + .unwrap() + .is_none()); + self + } + + pub async fn assert_all_validators_exist(&mut self) -> &mut Self { + assert_eq!( + self.client + .get_all_validators() + .await + .unwrap() + .unwrap() + .len(), + VALIDATOR_COUNT + ); + self + } + + pub async fn assert_lowest_block_has_proposer_info(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_proposer_info(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_rewards(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_rewards(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_packing(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + while block.slot.as_slot() <= SLOTS_PER_EPOCH { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_packing(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + /// Check that the canonical chain in watch matches that of the harness. Also check that all + /// canonical blocks can be retrieved. + pub async fn assert_canonical_chain_consistent(&mut self, last_slot: u64) -> &mut Self { + let head_root = self.harness.chain.head_beacon_block_root(); + let mut chain: Vec<(Hash256, Slot)> = self + .harness + .chain + .rev_iter_block_roots_from(head_root) + .unwrap() + .map(Result::unwrap) + .collect(); + + // `chain` contains skip slots, but the `watch` API will not return blocks that do not + // exist. + // We need to filter them out. + chain.reverse(); + chain.dedup_by(|(hash1, _), (hash2, _)| hash1 == hash2); + + // Remove any slots below `last_slot` since it is known that the database has not + // backfilled past it. + chain.retain(|(_, slot)| slot.as_u64() >= last_slot); + + for (root, slot) in &chain { + let block = self + .client + .get_beacon_blocks(BlockId::Root(*root)) + .await + .unwrap() + .unwrap(); + assert_eq!(block.slot.as_slot(), *slot); + } + + self + } + + /// Check that every block in the `beacon_blocks` table has corresponding entries in the + /// `proposer_info`, `block_rewards` and `block_packing` tables. + pub async fn assert_all_blocks_have_metadata(&mut self) -> &mut Self { + let pool = database::build_connection_pool(&self.config.database).unwrap(); + + let mut conn = database::get_connection(&pool).unwrap(); + let highest_block_slot = database::get_highest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + let lowest_block_slot = database::get_lowest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + for slot in lowest_block_slot.as_u64()..=highest_block_slot.as_u64() { + let canonical_slot = database::get_canonical_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + if !canonical_slot.skipped { + database::get_block_rewards_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_proposer_info_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_block_packing_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + } + } + + self + } +} + +pub fn random_dbname() -> String { + let mut s: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect(); + // Postgres gets weird about capitals in database names. + s.make_ascii_lowercase(); + format!("test_{}", s) +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(16) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_sync_starts_on_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .skip_slot() + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(7) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_reorg() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .reorg_chain() + .await + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(8) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + // Apply four blocks to the chain. + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata_and_multiple_skip_slots() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + // And also backfill to the epoch boundary. + .await + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Add multiple skip slots. + .skip_slot() + .skip_slot() + .skip_slot() + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(8) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(10) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_to_second_epoch() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(40) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(40) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(32) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(40) + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(43) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Update new block_packing + // Backfill before forward fill to ensure order is arbitrary + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn large_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(400) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(400) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(384) + .await + // Backfill 2 epochs as per default config. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(384) + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // Should have backfilled 2 more epochs. + .assert_lowest_canonical_slot(320) + .await + .assert_highest_canonical_slot(400) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + .perform_backfill() + .await + // Should have backfilled 2 more epochs + .assert_lowest_canonical_slot(256) + .await + .assert_highest_canonical_slot(403) + .await + // Update validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Get suboptimal attestations. + .fill_suboptimal_attestations() + .await + .backfill_suboptimal_attestations() + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packing. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(256) + .await + // Check every block has rewards, proposer info and packing statistics. + .assert_all_blocks_have_metadata() + .await; +} From 32f9ba04d76338f245231bd2af3423eed30376f6 Mon Sep 17 00:00:00 2001 From: Diva M Date: Tue, 4 Apr 2023 12:10:51 -0500 Subject: [PATCH 17/17] fix merge conflict --- .../network/src/beacon_processor/work_reprocessing_queue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 969e1eea3d..b0795d120a 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -168,7 +168,7 @@ pub struct QueuedRpcBlock { #[derive(Clone)] pub struct QueuedBackfillBatch { pub process_id: ChainSegmentProcessId, - pub blocks: Vec>>, + pub blocks: Vec>, } impl TryFrom> for QueuedBackfillBatch {