From 58bd2f76d0819c7147684cb10f994480545ee769 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 3 Oct 2022 23:09:25 +0000 Subject: [PATCH 01/21] Ensure protoc is installed for release CI (#3621) ## Issue Addressed The release CI is currently broken due to the addition of the `protoc` dependency. Here's a failure of the release flow running on my fork: https://github.com/michaelsproul/lighthouse/actions/runs/3155541478/jobs/5134317334 ## Proposed Changes - Install `protoc` on Windows and Mac so that it's available for `cargo install`. - Install an x86_64 binary in the Cross image for the aarch64 platform: we need a binary that runs on the host, _not_ on the target. - Fix `macos` local testnet CI by using the Github API key to dodge rate limiting (this issue: https://github.com/actions/runner-images/issues/602). --- .github/workflows/local-testnet.yml | 2 ++ .github/workflows/release.yml | 9 +++++++++ Cross.toml | 4 ++-- lcli/Dockerfile | 2 +- ..._64-unknown-linux-gnu.dockerfile => Dockerfile} | 0 scripts/cross/aarch64-unknown-linux-gnu.dockerfile | 14 -------------- 6 files changed, 14 insertions(+), 17 deletions(-) rename scripts/cross/{x86_64-unknown-linux-gnu.dockerfile => Dockerfile} (100%) delete mode 100644 scripts/cross/aarch64-unknown-linux-gnu.dockerfile diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index c688c0df33..170bd9e212 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -22,6 +22,8 @@ jobs: run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: npm install ganache@latest --global diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 158cbaa08b..6edb1f76c1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -83,6 +83,15 @@ jobs: if: startsWith(matrix.arch, 'x86_64-windows') run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV + # ============================== + # Windows & Mac dependencies + # ============================== + - name: Install Protoc + if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows') + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + # ============================== # Builds # ============================== diff --git a/Cross.toml b/Cross.toml index 963e22d0e0..9c3e441cba 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -dockerfile = './scripts/cross/x86_64-unknown-linux-gnu.dockerfile' +dockerfile = './scripts/cross/Dockerfile' [target.aarch64-unknown-linux-gnu] -dockerfile = './scripts/cross/aarch64-unknown-linux-gnu.dockerfile' +dockerfile = './scripts/cross/Dockerfile' diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 8fd3567cdc..1129e710f4 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -2,7 +2,7 @@ # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE diff --git a/scripts/cross/x86_64-unknown-linux-gnu.dockerfile b/scripts/cross/Dockerfile similarity index 100% rename from scripts/cross/x86_64-unknown-linux-gnu.dockerfile rename to scripts/cross/Dockerfile diff --git a/scripts/cross/aarch64-unknown-linux-gnu.dockerfile b/scripts/cross/aarch64-unknown-linux-gnu.dockerfile deleted file mode 100644 index 691639cd41..0000000000 --- a/scripts/cross/aarch64-unknown-linux-gnu.dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -ARG CROSS_BASE_IMAGE -FROM $CROSS_BASE_IMAGE - -RUN apt-get update -y && apt-get upgrade -y - -RUN apt-get install -y unzip && \ - PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \ - curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-aarch_64.zip -o protoc.zip && \ - unzip protoc.zip -d /usr && \ - chmod +x /usr/bin/protoc - -RUN apt-get install -y cmake clang-3.9 - -ENV PROTOC=/usr/bin/protoc From 8728c40102dabefb6a43c8335da0051a0d986f0d Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 4 Oct 2022 08:33:39 +0000 Subject: [PATCH 02/21] Remove fallback support from eth1 service (#3594) ## Issue Addressed N/A ## Proposed Changes With https://github.com/sigp/lighthouse/pull/3214 we made it such that you can either have 1 auth endpoint or multiple non auth endpoints. Now that we are post merge on all networks (testnets and mainnet), we cannot progress a chain without a dedicated auth execution layer connection so there is no point in having a non-auth eth1-endpoint for syncing deposit cache. This code removes all fallback related code in the eth1 service. We still keep the single non-auth endpoint since it's useful for testing. ## Additional Info This removes all eth1 fallback related metrics that were relevant for the monitoring service, so we might need to change the api upstream. --- Cargo.lock | 8 - Cargo.toml | 1 - beacon_node/beacon_chain/src/builder.rs | 2 +- beacon_node/beacon_chain/src/eth1_chain.rs | 17 +- beacon_node/client/src/builder.rs | 12 +- beacon_node/eth1/Cargo.toml | 1 - beacon_node/eth1/src/inner.rs | 10 +- beacon_node/eth1/src/metrics.rs | 20 - beacon_node/eth1/src/service.rs | 403 ++++-------------- beacon_node/eth1/tests/test.rs | 352 +++------------ .../genesis/src/eth1_genesis_service.rs | 15 +- beacon_node/genesis/tests/tests.rs | 10 +- beacon_node/http_api/tests/common.rs | 3 +- beacon_node/src/cli.rs | 6 +- beacon_node/src/config.rs | 17 +- beacon_node/src/lib.rs | 2 +- common/fallback/Cargo.toml | 10 - common/fallback/src/lib.rs | 63 --- common/monitoring_api/src/gather.rs | 20 + lcli/src/eth1_genesis.rs | 17 +- lighthouse/tests/beacon_node.rs | 23 +- testing/simulator/src/eth1_sim.rs | 18 +- 22 files changed, 228 insertions(+), 802 deletions(-) delete mode 100644 common/fallback/Cargo.toml delete mode 100644 common/fallback/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index cfefa6c116..8fb8c54929 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1661,7 +1661,6 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "execution_layer", - "fallback", "futures", "hex", "lazy_static", @@ -2117,13 +2116,6 @@ dependencies = [ "futures", ] -[[package]] -name = "fallback" -version = "0.1.0" -dependencies = [ - "itertools", -] - [[package]] name = "fallible-iterator" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 27120e217f..02cf4d9436 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,7 +43,6 @@ members = [ "common/unused_port", "common/validator_dir", "common/warp_utils", - "common/fallback", "common/monitoring_api", "database_manager", diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 916ebd2359..051b84f816 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -897,7 +897,7 @@ where .ok_or("dummy_eth1_backend requires a log")?; let backend = - CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone()); + CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone())?; self.eth1_chain = Some(Eth1Chain::new_dummy(backend)); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 8dd101b726..3d24becc84 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -431,12 +431,13 @@ impl CachingEth1Backend { /// Instantiates `self` with empty caches. /// /// Does not connect to the eth1 node or start any tasks to keep the cache updated. - pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self { - Self { - core: HttpService::new(config, log.clone(), spec), + pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result { + Ok(Self { + core: HttpService::new(config, log.clone(), spec) + .map_err(|e| format!("Failed to create eth1 http service: {:?}", e))?, log, _phantom: PhantomData, - } + }) } /// Starts the routine which connects to the external eth1 node and updates the caches. @@ -730,11 +731,9 @@ mod test { }; let log = null_logger().unwrap(); - Eth1Chain::new(CachingEth1Backend::new( - eth1_config, - log, - MainnetEthSpec::default_spec(), - )) + Eth1Chain::new( + CachingEth1Backend::new(eth1_config, log, MainnetEthSpec::default_spec()).unwrap(), + ) } fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 752ba3b7bc..a46d91ad1e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -370,7 +370,7 @@ where info!( context.log(), "Waiting for eth2 genesis from eth1"; - "eth1_endpoints" => format!("{:?}", &config.eth1.endpoints), + "eth1_endpoints" => format!("{:?}", &config.eth1.endpoint), "contract_deploy_block" => config.eth1.deposit_contract_deploy_block, "deposit_contract" => &config.eth1.deposit_contract_address ); @@ -379,7 +379,7 @@ where config.eth1, context.log().clone(), context.eth2_config().spec.clone(), - ); + )?; // If the HTTP API server is enabled, start an instance of it where it only // contains a reference to the eth1 service (all non-eth1 endpoints will fail @@ -875,7 +875,7 @@ where CachingEth1Backend::from_service(eth1_service_from_genesis) } else if config.purge_cache { - CachingEth1Backend::new(config, context.log().clone(), spec) + CachingEth1Backend::new(config, context.log().clone(), spec)? } else { beacon_chain_builder .get_persisted_eth1_backend()? @@ -889,11 +889,7 @@ where .map(|chain| chain.into_backend()) }) .unwrap_or_else(|| { - Ok(CachingEth1Backend::new( - config, - context.log().clone(), - spec.clone(), - )) + CachingEth1Backend::new(config, context.log().clone(), spec.clone()) })? }; diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 403869cc9c..930301256c 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -31,5 +31,4 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics"} lazy_static = "1.4.0" task_executor = { path = "../../common/task_executor" } eth2 = { path = "../../common/eth2" } -fallback = { path = "../../common/fallback" } sensitive_url = { path = "../../common/sensitive_url" } diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index 9a57f450e1..b0a951bef0 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -1,14 +1,14 @@ +use crate::service::endpoint_from_config; use crate::Config; use crate::{ block_cache::{BlockCache, Eth1Block}, deposit_cache::{DepositCache, SszDepositCache}, - service::EndpointsCache, }; +use execution_layer::HttpJsonRpc; use parking_lot::RwLock; use ssz::four_byte_option_impl; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::sync::Arc; use types::ChainSpec; // Define "legacy" implementations of `Option` which use four bytes for encoding the union @@ -31,11 +31,10 @@ impl DepositUpdater { } } -#[derive(Default)] pub struct Inner { pub block_cache: RwLock, pub deposit_cache: RwLock, - pub endpoints_cache: RwLock>>, + pub endpoint: HttpJsonRpc, pub config: RwLock, pub remote_head_block: RwLock>, pub spec: ChainSpec, @@ -96,7 +95,8 @@ impl SszEth1Cache { cache: self.deposit_cache.to_deposit_cache()?, last_processed_block: self.last_processed_block, }), - endpoints_cache: RwLock::new(None), + endpoint: endpoint_from_config(&config) + .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, // Set the remote head_block zero when creating a new instance. We only care about // present and future eth1 nodes. remote_head_block: RwLock::new(None), diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs index f3d9483b2b..5441b40d7e 100644 --- a/beacon_node/eth1/src/metrics.rs +++ b/beacon_node/eth1/src/metrics.rs @@ -17,16 +17,6 @@ lazy_static! { pub static ref HIGHEST_PROCESSED_DEPOSIT_BLOCK: Result = try_create_int_gauge("eth1_highest_processed_deposit_block", "Number of the last block checked for deposits"); - /* - * Eth1 endpoint errors - */ - pub static ref ENDPOINT_ERRORS: Result = try_create_int_counter_vec( - "eth1_endpoint_errors", "The number of eth1 request errors for each endpoint", &["endpoint"] - ); - pub static ref ENDPOINT_REQUESTS: Result = try_create_int_counter_vec( - "eth1_endpoint_requests", "The number of eth1 requests for each endpoint", &["endpoint"] - ); - /* * Eth1 rpc connection */ @@ -35,14 +25,4 @@ lazy_static! { "sync_eth1_connected", "Set to 1 if connected to an eth1 node, otherwise set to 0" ); - pub static ref ETH1_FALLBACK_CONFIGURED: Result = try_create_int_gauge( - "sync_eth1_fallback_configured", "Number of configured eth1 fallbacks" - ); - - // Note: This metric only checks if an eth1 fallback is configured, not if it is connected and synced. - // Checking for liveness of the fallback would require moving away from lazy checking of fallbacks. - pub static ref ETH1_FALLBACK_CONNECTED: Result = try_create_int_gauge( - "eth1_sync_fallback_connected", "Set to 1 if an eth1 fallback is connected, otherwise set to 0" - ); - } diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index a4d4e5e254..fae6eef9c2 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -9,19 +9,16 @@ use execution_layer::http::{ deposit_methods::{BlockQuery, Eth1Id}, HttpJsonRpc, }; -use fallback::{Fallback, FallbackError}; use futures::future::TryFutureExt; use parking_lot::{RwLock, RwLockReadGuard}; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slog::{debug, error, info, trace, warn, Logger}; use std::fmt::Debug; -use std::future::Future; use std::ops::{Range, RangeInclusive}; use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::sync::RwLock as TRwLock; use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, EthSpec, Unsigned}; @@ -53,127 +50,12 @@ const CACHE_FACTOR: u64 = 2; #[derive(Debug, PartialEq, Clone)] pub enum EndpointError { RequestFailed(String), - WrongNetworkId, WrongChainId, FarBehind, } type EndpointState = Result<(), EndpointError>; -pub struct EndpointWithState { - client: HttpJsonRpc, - state: TRwLock>, -} - -impl EndpointWithState { - pub fn new(client: HttpJsonRpc) -> Self { - Self { - client, - state: TRwLock::new(None), - } - } -} - -async fn reset_endpoint_state(endpoint: &EndpointWithState) { - *endpoint.state.write().await = None; -} - -async fn get_state(endpoint: &EndpointWithState) -> Option { - endpoint.state.read().await.clone() -} - -/// A cache structure to lazily check usability of endpoints. An endpoint is usable if it is -/// reachable and has the correct network id and chain id. Emits a `WARN` log if a checked endpoint -/// is not usable. -pub struct EndpointsCache { - pub fallback: Fallback, - pub config_chain_id: Eth1Id, - pub log: Logger, -} - -impl EndpointsCache { - /// Checks the usability of an endpoint. Results get cached and therefore only the first call - /// for each endpoint does the real check. - async fn state(&self, endpoint: &EndpointWithState) -> EndpointState { - if let Some(result) = endpoint.state.read().await.clone() { - return result; - } - let mut value = endpoint.state.write().await; - if let Some(result) = value.clone() { - return result; - } - crate::metrics::inc_counter_vec( - &crate::metrics::ENDPOINT_REQUESTS, - &[&endpoint.client.to_string()], - ); - let state = endpoint_state(&endpoint.client, &self.config_chain_id, &self.log).await; - *value = Some(state.clone()); - if state.is_err() { - crate::metrics::inc_counter_vec( - &crate::metrics::ENDPOINT_ERRORS, - &[&endpoint.client.to_string()], - ); - crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); - } else { - crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 1); - } - state - } - - /// Return the first successful result along with number of previous errors encountered - /// or all the errors encountered if every none of the fallback endpoints return required output. - pub async fn first_success<'a, F, O, R>( - &'a self, - func: F, - ) -> Result<(O, usize), FallbackError> - where - F: Fn(&'a HttpJsonRpc) -> R, - R: Future>, - { - let func = &func; - self.fallback - .first_success(|endpoint| async move { - match self.state(endpoint).await { - Ok(()) => { - let endpoint_str = &endpoint.client.to_string(); - crate::metrics::inc_counter_vec( - &crate::metrics::ENDPOINT_REQUESTS, - &[endpoint_str], - ); - match func(&endpoint.client).await { - Ok(t) => Ok(t), - Err(t) => { - crate::metrics::inc_counter_vec( - &crate::metrics::ENDPOINT_ERRORS, - &[endpoint_str], - ); - if let SingleEndpointError::EndpointError(e) = &t { - *endpoint.state.write().await = Some(Err(e.clone())); - } else { - // A non-`EndpointError` error occurred, so reset the state. - reset_endpoint_state(endpoint).await; - } - Err(t) - } - } - } - Err(e) => Err(SingleEndpointError::EndpointError(e)), - } - }) - .await - } - - pub async fn reset_errorred_endpoints(&self) { - for endpoint in &self.fallback.servers { - if let Some(state) = get_state(endpoint).await { - if state.is_err() { - reset_endpoint_state(endpoint).await; - } - } - } - } -} - /// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and /// chain id. Otherwise it returns `Err`. async fn endpoint_state( @@ -186,7 +68,6 @@ async fn endpoint_state( log, "Error connecting to eth1 node endpoint"; "endpoint" => %endpoint, - "action" => "trying fallbacks" ); EndpointError::RequestFailed(e) }; @@ -202,7 +83,6 @@ async fn endpoint_state( log, "Remote execution node is not synced"; "endpoint" => %endpoint, - "action" => "trying fallbacks" ); return Err(EndpointError::FarBehind); } @@ -211,7 +91,6 @@ async fn endpoint_state( log, "Invalid execution chain ID. Please switch to correct chain ID on endpoint"; "endpoint" => %endpoint, - "action" => "trying fallbacks", "expected" => ?config_chain_id, "received" => ?chain_id, ); @@ -240,7 +119,7 @@ async fn get_remote_head_and_new_block_ranges( Option>, Option>, ), - SingleEndpointError, + Error, > { let remote_head_block = download_eth1_block(endpoint, service.inner.clone(), None).await?; let now = SystemTime::now() @@ -253,18 +132,16 @@ async fn get_remote_head_and_new_block_ranges( "Execution endpoint is not synced"; "endpoint" => %endpoint, "last_seen_block_unix_timestamp" => remote_head_block.timestamp, - "action" => "trying fallback" ); - return Err(SingleEndpointError::EndpointError(EndpointError::FarBehind)); + return Err(Error::EndpointError(EndpointError::FarBehind)); } let handle_remote_not_synced = |e| { - if let SingleEndpointError::RemoteNotSynced { .. } = e { + if let Error::RemoteNotSynced { .. } = e { warn!( service.log, "Execution endpoint is not synced"; "endpoint" => %endpoint, - "action" => "trying fallbacks" ); } e @@ -296,16 +173,25 @@ async fn relevant_new_block_numbers_from_endpoint( endpoint: &HttpJsonRpc, service: &Service, head_type: HeadType, -) -> Result>, SingleEndpointError> { +) -> Result>, Error> { let remote_highest_block = endpoint .get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) - .map_err(SingleEndpointError::GetBlockNumberFailed) + .map_err(Error::GetBlockNumberFailed) .await?; service.relevant_new_block_numbers(remote_highest_block, None, head_type) } #[derive(Debug, PartialEq)] -pub enum SingleEndpointError { +pub enum Error { + /// There was an inconsistency when adding a block to the cache. + FailedToInsertEth1Block(BlockCacheError), + /// There was an inconsistency when adding a deposit to the cache. + FailedToInsertDeposit(DepositCacheError), + /// A log downloaded from the eth1 contract was not well formed. + FailedToParseDepositLog { + block_range: Range, + error: String, + }, /// Endpoint is currently not functional. EndpointError(EndpointError), /// The remote node is less synced that we expect, it is not useful until has done more @@ -325,21 +211,6 @@ pub enum SingleEndpointError { GetDepositCountFailed(String), /// Failed to read the deposit contract root from the eth1 node. GetDepositLogsFailed(String), -} - -#[derive(Debug, PartialEq)] -pub enum Error { - /// There was an inconsistency when adding a block to the cache. - FailedToInsertEth1Block(BlockCacheError), - /// There was an inconsistency when adding a deposit to the cache. - FailedToInsertDeposit(DepositCacheError), - /// A log downloaded from the eth1 contract was not well formed. - FailedToParseDepositLog { - block_range: Range, - error: String, - }, - /// All possible endpoints returned a `SingleEndpointError`. - FallbackError(FallbackError), /// There was an unexpected internal error. Internal(String), } @@ -367,21 +238,14 @@ pub enum Eth1Endpoint { jwt_id: Option, jwt_version: Option, }, - NoAuth(Vec), + NoAuth(SensitiveUrl), } impl Eth1Endpoint { - fn len(&self) -> usize { + pub fn get_endpoint(&self) -> SensitiveUrl { match &self { - Self::Auth { .. } => 1, - Self::NoAuth(urls) => urls.len(), - } - } - - pub fn get_endpoints(&self) -> Vec { - match &self { - Self::Auth { endpoint, .. } => vec![endpoint.clone()], - Self::NoAuth(endpoints) => endpoints.clone(), + Self::Auth { endpoint, .. } => endpoint.clone(), + Self::NoAuth(endpoint) => endpoint.clone(), } } } @@ -389,7 +253,7 @@ impl Eth1Endpoint { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { /// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint. - pub endpoints: Eth1Endpoint, + pub endpoint: Eth1Endpoint, /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. pub deposit_contract_address: String, /// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet). @@ -466,8 +330,10 @@ impl Config { impl Default for Config { fn default() -> Self { Self { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) - .expect("The default Eth1 endpoint must always be a valid URL.")]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) + .expect("The default Eth1 endpoint must always be a valid URL."), + ), deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), chain_id: DEFAULT_CHAIN_ID, deposit_contract_deploy_block: 1, @@ -485,6 +351,24 @@ impl Default for Config { } } +pub fn endpoint_from_config(config: &Config) -> Result { + match config.endpoint.clone() { + Eth1Endpoint::Auth { + endpoint, + jwt_path, + jwt_id, + jwt_version, + } => { + let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) + .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; + HttpJsonRpc::new_with_auth(endpoint, auth) + .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) + } + Eth1Endpoint::NoAuth(endpoint) => HttpJsonRpc::new(endpoint) + .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)), + } +} + /// Provides a set of Eth1 caches and async functions to update them. /// /// Stores the following caches: @@ -499,20 +383,24 @@ pub struct Service { impl Service { /// Creates a new service. Does not attempt to connect to the eth1 node. - pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Self { - Self { + pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Result { + Ok(Self { inner: Arc::new(Inner { block_cache: <_>::default(), deposit_cache: RwLock::new(DepositUpdater::new( config.deposit_contract_deploy_block, )), - endpoints_cache: RwLock::new(None), + endpoint: endpoint_from_config(&config)?, remote_head_block: RwLock::new(None), config: RwLock::new(config), spec, }), log, - } + }) + } + + pub fn client(&self) -> &HttpJsonRpc { + &self.inner.endpoint } /// Returns the follow distance that has been shortened to accommodate for differences in the @@ -676,52 +564,6 @@ impl Service { self.inner.config.write().lowest_cached_block_number = block_number; } - /// Builds a new `EndpointsCache` with empty states. - pub fn init_endpoints(&self) -> Result, String> { - let endpoints = self.config().endpoints.clone(); - let config_chain_id = self.config().chain_id.clone(); - - let servers = match endpoints { - Eth1Endpoint::Auth { - jwt_path, - endpoint, - jwt_id, - jwt_version, - } => { - let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) - .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; - vec![HttpJsonRpc::new_with_auth(endpoint, auth) - .map_err(|e| format!("Failed to build auth enabled json rpc {:?}", e))?] - } - Eth1Endpoint::NoAuth(urls) => urls - .into_iter() - .map(|url| { - HttpJsonRpc::new(url).map_err(|e| format!("Failed to build json rpc {:?}", e)) - }) - .collect::>()?, - }; - let new_cache = Arc::new(EndpointsCache { - fallback: Fallback::new(servers.into_iter().map(EndpointWithState::new).collect()), - config_chain_id, - log: self.log.clone(), - }); - - let mut endpoints_cache = self.inner.endpoints_cache.write(); - *endpoints_cache = Some(new_cache.clone()); - Ok(new_cache) - } - - /// Returns the cached `EndpointsCache` if it exists or builds a new one. - pub fn get_endpoints(&self) -> Result, String> { - let endpoints_cache = self.inner.endpoints_cache.read(); - if let Some(cache) = endpoints_cache.clone() { - Ok(cache) - } else { - drop(endpoints_cache); - self.init_endpoints() - } - } - /// Update the deposit and block cache, returning an error if either fail. /// /// ## Returns @@ -733,56 +575,28 @@ impl Service { pub async fn update( &self, ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { - let endpoints = self.get_endpoints()?; - - // Reset the state of any endpoints which have errored so their state can be redetermined. - endpoints.reset_errorred_endpoints().await; - + let client = self.client(); + let log = self.log.clone(); + let chain_id = self.config().chain_id.clone(); let node_far_behind_seconds = self.inner.config.read().node_far_behind_seconds; - let process_single_err = |e: &FallbackError| { - match e { - FallbackError::AllErrored(errors) => { - if errors - .iter() - .all(|error| matches!(error, SingleEndpointError::EndpointError(_))) - { - error!( - self.log, - "No synced execution endpoint"; - "advice" => "ensure you have an execution node configured via \ - --execution-endpoint or if pre-merge, --eth1-endpoints" - ); - } - } + match endpoint_state(client, &chain_id, &log).await { + Ok(()) => crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 1), + Err(e) => { + crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); + return Err(format!("Invalid endpoint state: {:?}", e)); } - endpoints.fallback.map_format_error(|s| &s.client, e) - }; - - let process_err = |e: Error| match &e { - Error::FallbackError(f) => process_single_err(f), - e => format!("{:?}", e), - }; - - let ( - (remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache), - num_errors, - ) = endpoints - .first_success(|e| async move { - get_remote_head_and_new_block_ranges(e, self, node_far_behind_seconds).await - }) - .await - .map_err(|e| format!("{:?}", process_single_err(&e)))?; - - if num_errors > 0 { - info!(self.log, "Fetched data from fallback"; "fallback_number" => num_errors); } + let (remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache) = + get_remote_head_and_new_block_ranges(client, self, node_far_behind_seconds) + .await + .map_err(|e| format!("Failed to get remote head and new block ranges: {:?}", e))?; *self.inner.remote_head_block.write() = Some(remote_head_block); let update_deposit_cache = async { let outcome_result = self - .update_deposit_cache(Some(new_block_numbers_deposit), &endpoints) + .update_deposit_cache(Some(new_block_numbers_deposit)) .await; // Reset the `last_procesed block` to the last valid deposit's block number. @@ -804,8 +618,8 @@ impl Service { deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number(); } - let outcome = outcome_result - .map_err(|e| format!("Failed to update deposit cache: {:?}", process_err(e)))?; + let outcome = + outcome_result.map_err(|e| format!("Failed to update deposit cache: {:?}", e))?; trace!( self.log, @@ -819,14 +633,9 @@ impl Service { let update_block_cache = async { let outcome = self - .update_block_cache(Some(new_block_numbers_block_cache), &endpoints) + .update_block_cache(Some(new_block_numbers_block_cache)) .await - .map_err(|e| { - format!( - "Failed to update deposit contract block cache: {:?}", - process_err(e) - ) - })?; + .map_err(|e| format!("Failed to update deposit contract block cache: {:?}", e))?; trace!( self.log, @@ -858,7 +667,6 @@ impl Service { let mut interval = interval_at(Instant::now(), update_interval); - let num_fallbacks = self.config().endpoints.len() - 1; let update_future = async move { loop { interval.tick().await; @@ -866,15 +674,6 @@ impl Service { } }; - // Set the number of configured eth1 servers - metrics::set_gauge(&metrics::ETH1_FALLBACK_CONFIGURED, num_fallbacks as i64); - // Since we lazily update eth1 fallbacks, it's not possible to know connection status of fallback. - // Hence, we set it to 1 if we have atleast one configured fallback. - if num_fallbacks > 0 { - metrics::set_gauge(&metrics::ETH1_FALLBACK_CONNECTED, 1); - } else { - metrics::set_gauge(&metrics::ETH1_FALLBACK_CONNECTED, 0); - } handle.spawn(update_future, "eth1"); } @@ -904,7 +703,7 @@ impl Service { remote_highest_block_number: u64, remote_highest_block_timestamp: Option, head_type: HeadType, - ) -> Result>, SingleEndpointError> { + ) -> Result>, Error> { let follow_distance = self.cache_follow_distance(); let latest_cached_block = self.latest_cached_block(); let next_required_block = match head_type { @@ -948,8 +747,8 @@ impl Service { pub async fn update_deposit_cache( &self, new_block_numbers: Option>>, - endpoints: &EndpointsCache, ) -> Result { + let client = self.client(); let deposit_contract_address = self.config().deposit_contract_address.clone(); let blocks_per_log_query = self.config().blocks_per_log_query; @@ -961,13 +760,10 @@ impl Service { let range = { match new_block_numbers { Some(range) => range, - None => endpoints - .first_success(|e| async move { - relevant_new_block_numbers_from_endpoint(e, self, HeadType::Deposit).await - }) - .await - .map(|(res, _)| res) - .map_err(Error::FallbackError)?, + None => { + relevant_new_block_numbers_from_endpoint(client, self, HeadType::Deposit) + .await? + } } }; @@ -1001,20 +797,14 @@ impl Service { * Step 1. Download logs. */ let block_range_ref = &block_range; - let logs = endpoints - .first_success(|endpoint| async move { - endpoint - .get_deposit_logs_in_range( - deposit_contract_address_ref, - block_range_ref.clone(), - Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), - ) - .await - .map_err(SingleEndpointError::GetDepositLogsFailed) - }) + let logs = client + .get_deposit_logs_in_range( + deposit_contract_address_ref, + block_range_ref.clone(), + Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), + ) .await - .map(|(res, _)| res) - .map_err(Error::FallbackError)?; + .map_err(Error::GetDepositLogsFailed)?; /* * Step 2. Import logs to cache. @@ -1050,7 +840,7 @@ impl Service { logs_imported += 1; } - Ok(()) + Ok::<_, Error>(()) })?; debug!( @@ -1105,8 +895,8 @@ impl Service { pub async fn update_block_cache( &self, new_block_numbers: Option>>, - endpoints: &EndpointsCache, ) -> Result { + let client = self.client(); let block_cache_truncation = self.config().block_cache_truncation; let max_blocks_per_update = self .config() @@ -1116,14 +906,10 @@ impl Service { let range = { match new_block_numbers { Some(range) => range, - None => endpoints - .first_success(|e| async move { - relevant_new_block_numbers_from_endpoint(e, self, HeadType::BlockCache) - .await - }) - .await - .map(|(res, _)| res) - .map_err(Error::FallbackError)?, + None => { + relevant_new_block_numbers_from_endpoint(client, self, HeadType::BlockCache) + .await? + } } }; @@ -1183,13 +969,8 @@ impl Service { let mut blocks_imported = 0; for block_number in required_block_numbers { - let eth1_block = endpoints - .first_success(|e| async move { - download_eth1_block(e, self.inner.clone(), Some(block_number)).await - }) - .await - .map(|(res, _)| res) - .map_err(Error::FallbackError)?; + let eth1_block = + download_eth1_block(client, self.inner.clone(), Some(block_number)).await?; self.inner .block_cache @@ -1269,7 +1050,7 @@ fn relevant_block_range( cache_follow_distance: u64, latest_cached_block: Option<&Eth1Block>, spec: &ChainSpec, -) -> Result>, SingleEndpointError> { +) -> Result>, Error> { // If the latest cached block is lagging the head block by more than `cache_follow_distance` // times the expected block time then the eth1 block time is likely quite different from what we // assumed. @@ -1304,7 +1085,7 @@ fn relevant_block_range( // // We assume that the `cache_follow_distance` should be sufficient to ensure this never // happens, otherwise it is an error. - Err(SingleEndpointError::RemoteNotSynced { + Err(Error::RemoteNotSynced { next_required_block, remote_highest_block: remote_highest_block_number, cache_follow_distance, @@ -1325,7 +1106,7 @@ async fn download_eth1_block( endpoint: &HttpJsonRpc, cache: Arc, block_number_opt: Option, -) -> Result { +) -> Result { let deposit_root = block_number_opt.and_then(|block_number| { cache .deposit_cache @@ -1350,7 +1131,7 @@ async fn download_eth1_block( .unwrap_or_else(|| BlockQuery::Latest), Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), ) - .map_err(SingleEndpointError::BlockDownloadFailed) + .map_err(Error::BlockDownloadFailed) .await?; Ok(Eth1Block { diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index f7f3b6e703..9f81f91e19 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -117,10 +117,9 @@ mod eth1_cache { let initial_block_number = get_block_number(&web3).await; let config = Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance, @@ -128,7 +127,8 @@ mod eth1_cache { }; let cache_follow_distance = config.cache_follow_distance(); - let service = Service::new(config, log.clone(), MainnetEthSpec::default_spec()); + let service = + Service::new(config, log.clone(), MainnetEthSpec::default_spec()).unwrap(); // Create some blocks and then consume them, performing the test `rounds` times. for round in 0..2 { @@ -149,19 +149,17 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints().unwrap(); - service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should update deposit cache"); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should update block cache"); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should update cache when nothing has changed"); @@ -201,10 +199,9 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -213,7 +210,8 @@ mod eth1_cache { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); let blocks = cache_len * 2; @@ -221,14 +219,12 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints().unwrap(); - service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should update deposit cache"); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should update block cache"); @@ -258,10 +254,9 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -270,19 +265,19 @@ mod eth1_cache { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); for _ in 0..4u8 { for _ in 0..cache_len / 2 { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints().unwrap(); service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should update deposit cache"); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should update block cache"); } @@ -311,10 +306,9 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -322,21 +316,21 @@ mod eth1_cache { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); for _ in 0..n { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints().unwrap(); futures::try_join!( - service.update_deposit_cache(None, &endpoints), - service.update_deposit_cache(None, &endpoints) + service.update_deposit_cache(None), + service.update_deposit_cache(None) ) .expect("should perform two simultaneous updates of deposit cache"); futures::try_join!( - service.update_block_cache(None, &endpoints), - service.update_block_cache(None, &endpoints) + service.update_block_cache(None), + service.update_block_cache(None) ) .expect("should perform two simultaneous updates of block cache"); @@ -366,10 +360,9 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, follow_distance: 0, @@ -377,7 +370,8 @@ mod deposit_tree { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); for round in 0..3 { let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); @@ -389,15 +383,13 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints().unwrap(); - service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should perform update"); service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should perform update when nothing has changed"); @@ -449,10 +441,9 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, lowest_cached_block_number: start_block, @@ -461,7 +452,8 @@ mod deposit_tree { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); @@ -472,10 +464,9 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints().unwrap(); futures::try_join!( - service.update_deposit_cache(None, &endpoints), - service.update_deposit_cache(None, &endpoints) + service.update_deposit_cache(None), + service.update_deposit_cache(None) ) .expect("should perform two updates concurrently"); @@ -706,10 +697,9 @@ mod fast { let now = get_block_number(&web3).await; let service = Service::new( Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -719,7 +709,8 @@ mod fast { }, log, MainnetEthSpec::default_spec(), - ); + ) + .unwrap(); let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); @@ -732,9 +723,8 @@ mod fast { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints().unwrap(); service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should perform update"); @@ -787,10 +777,9 @@ mod persist { let now = get_block_number(&web3).await; let config = Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -798,7 +787,8 @@ mod persist { block_cache_truncation: None, ..Config::default() }; - let service = Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec()); + let service = + Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec()).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { @@ -808,9 +798,8 @@ mod persist { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints().unwrap(); service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .expect("should perform update"); @@ -822,7 +811,7 @@ mod persist { let deposit_count = service.deposit_cache_len(); service - .update_block_cache(None, &endpoints) + .update_block_cache(None) .await .expect("should perform update"); @@ -855,228 +844,3 @@ mod persist { .await; } } - -/// Tests for eth1 fallback -mod fallbacks { - use super::*; - use tokio::time::sleep; - - #[tokio::test] - async fn test_fallback_when_offline() { - async { - let log = null_logger(); - let endpoint2 = new_ganache_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = &endpoint2.deposit_contract; - - let initial_block_number = get_block_number(&endpoint2.web3()).await; - - // Create some blocks and then consume them, performing the test `rounds` times. - let new_blocks = 4; - - for _ in 0..new_blocks { - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let endpoint1 = endpoint2 - .ganache - .fork() - .expect("should start eth1 environment"); - - //mine additional blocks on top of the original endpoint - for _ in 0..new_blocks { - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let service = Service::new( - Config { - endpoints: Eth1Endpoint::NoAuth(vec![ - SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ]), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance: 0, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); - - let endpoint1_block_number = get_block_number(&endpoint1.web3).await; - //the first call will only query endpoint1 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint1_block_number - ); - - drop(endpoint1); - - let endpoint2_block_number = get_block_number(&endpoint2.web3()).await; - assert!(endpoint1_block_number < endpoint2_block_number); - //endpoint1 is offline => query will import blocks from endpoint2 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint2_block_number - ); - } - .await; - } - - #[tokio::test] - async fn test_fallback_when_wrong_chain_id() { - async { - let log = null_logger(); - let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into(); - let wrong_chain_id = correct_chain_id + 1; - let endpoint1 = GanacheEth1Instance::new(wrong_chain_id) - .await - .expect("should start eth1 environment"); - let endpoint2 = new_ganache_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = &endpoint2.deposit_contract; - - let initial_block_number = get_block_number(&endpoint2.web3()).await; - - // Create some blocks and then consume them, performing the test `rounds` times. - let new_blocks = 4; - - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - //additional blocks for endpoint1 to be able to distinguish - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let service = Service::new( - Config { - endpoints: Eth1Endpoint::NoAuth(vec![ - SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - ]), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance: 0, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); - - let endpoint1_block_number = get_block_number(&endpoint1.web3()).await; - let endpoint2_block_number = get_block_number(&endpoint2.web3()).await; - assert!(endpoint2_block_number < endpoint1_block_number); - //the call will fallback to endpoint2 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint2_block_number - ); - } - .await; - } - - #[tokio::test] - async fn test_fallback_when_node_far_behind() { - async { - let log = null_logger(); - let endpoint2 = new_ganache_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = &endpoint2.deposit_contract; - - let initial_block_number = get_block_number(&endpoint2.web3()).await; - - // Create some blocks and then consume them, performing the test `rounds` times. - let new_blocks = 4; - - for _ in 0..new_blocks { - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let endpoint1 = endpoint2 - .ganache - .fork() - .expect("should start eth1 environment"); - - let service = Service::new( - Config { - endpoints: Eth1Endpoint::NoAuth(vec![ - SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ]), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance: 0, - node_far_behind_seconds: 5, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); - - let endpoint1_block_number = get_block_number(&endpoint1.web3).await; - //the first call will only query endpoint1 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint1_block_number - ); - - sleep(Duration::from_secs(7)).await; - - //both endpoints don't have recent blocks => should return error - assert!(service.update().await.is_err()); - - //produce some new blocks on endpoint2 - for _ in 0..new_blocks { - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let endpoint2_block_number = get_block_number(&endpoint2.web3()).await; - - //endpoint1 is far behind + endpoint2 not => update will import blocks from endpoint2 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint2_block_number - ); - } - .await; - } -} diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 089f79aa11..5614e237ff 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -43,7 +43,7 @@ impl Eth1GenesisService { /// Creates a new service. Does not attempt to connect to the Eth1 node. /// /// Modifies the given `config` to make it more suitable to the task of listening to genesis. - pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self { + pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result { let config = Eth1Config { // Truncating the block cache makes searching for genesis more // complicated. @@ -64,15 +64,16 @@ impl Eth1GenesisService { ..config }; - Self { - eth1_service: Eth1Service::new(config, log, spec), + Ok(Self { + eth1_service: Eth1Service::new(config, log, spec) + .map_err(|e| format!("Failed to create eth1 service: {:?}", e))?, stats: Arc::new(Statistics { highest_processed_block: AtomicU64::new(0), active_validator_count: AtomicUsize::new(0), total_deposit_count: AtomicUsize::new(0), latest_timestamp: AtomicU64::new(0), }), - } + }) } /// Returns the first eth1 block that has enough deposits that it's a (potentially invalid) @@ -112,11 +113,9 @@ impl Eth1GenesisService { "Importing eth1 deposit logs"; ); - let endpoints = eth1_service.init_endpoints()?; - loop { let update_result = eth1_service - .update_deposit_cache(None, &endpoints) + .update_deposit_cache(None) .await .map_err(|e| format!("{:?}", e)); @@ -158,7 +157,7 @@ impl Eth1GenesisService { } // Download new eth1 blocks into the cache. - let blocks_imported = match eth1_service.update_block_cache(None, &endpoints).await { + let blocks_imported = match eth1_service.update_block_cache(None).await { Ok(outcome) => { debug!( log, diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 74a054fcc0..58f28702b0 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -44,10 +44,9 @@ fn basic() { let service = Eth1GenesisService::new( Eth1Config { - endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( - eth1.endpoint().as_str(), - ) - .unwrap()]), + endpoint: Eth1Endpoint::NoAuth( + SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), + ), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -57,7 +56,8 @@ fn basic() { }, log, spec.clone(), - ); + ) + .unwrap(); // NOTE: this test is sensitive to the response speed of the external web3 server. If // you're experiencing failures, try increasing the update_interval. diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index a0dbf40b29..eaf91ce9df 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -131,7 +131,8 @@ pub async fn create_api_server_on_port( pm.inject_connection_established(&peer_id, &con_id, &connected_point, None, 0); *network_globals.sync_state.write() = SyncState::Synced; - let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()); + let eth1_service = + eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); let context = Arc::new(Context { config: Config { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 9b5f65622a..51e8762f1c 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -372,9 +372,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("eth1-endpoints") .value_name("HTTP-ENDPOINTS") .conflicts_with("eth1-endpoint") - .help("One or more comma-delimited server endpoints for web3 connection. \ - If multiple endpoints are given the endpoints are used as fallback in the \ - given order. Also enables the --eth1 flag. \ + .help("One http endpoint for a web3 connection to an execution node. \ + Note: This flag is now only useful for testing, use `--execution-endpoint` \ + flag to connect to an execution node on mainnet and testnets. Defaults to http://127.0.0.1:8545.") .takes_value(true) ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 982cb82ed4..f1d0fb35a3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -230,17 +230,14 @@ pub fn get_config( ); client_config.sync_eth1_chain = true; - let endpoints = vec![SensitiveUrl::parse(endpoint) - .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?]; - client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); - } else if let Some(endpoints) = cli_args.value_of("eth1-endpoints") { + let endpoint = SensitiveUrl::parse(endpoint) + .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?; + client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint); + } else if let Some(endpoint) = cli_args.value_of("eth1-endpoints") { client_config.sync_eth1_chain = true; - let endpoints = endpoints - .split(',') - .map(SensitiveUrl::parse) - .collect::>() + let endpoint = SensitiveUrl::parse(endpoint) .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; - client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); + client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint); } if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { @@ -326,7 +323,7 @@ pub fn get_config( --eth1-endpoints has been deprecated for post-merge configurations" ); } - client_config.eth1.endpoints = Eth1Endpoint::Auth { + client_config.eth1.endpoint = Eth1Endpoint::Auth { endpoint: execution_endpoint, jwt_path: secret_file, jwt_id: el_config.jwt_id.clone(), diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 690271022a..9fd6882202 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -117,7 +117,7 @@ impl ProductionBeaconNode { info!( log, "Block production enabled"; - "endpoints" => format!("{:?}", &client_config.eth1.endpoints), + "endpoint" => format!("{:?}", &client_config.eth1.endpoint), "method" => "json rpc via http" ); builder diff --git a/common/fallback/Cargo.toml b/common/fallback/Cargo.toml deleted file mode 100644 index 0d71bbbd27..0000000000 --- a/common/fallback/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "fallback" -version = "0.1.0" -authors = ["blacktemplar "] -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -itertools = "0.10.0" diff --git a/common/fallback/src/lib.rs b/common/fallback/src/lib.rs deleted file mode 100644 index 70f327d204..0000000000 --- a/common/fallback/src/lib.rs +++ /dev/null @@ -1,63 +0,0 @@ -use itertools::{join, zip}; -use std::fmt::{Debug, Display}; -use std::future::Future; - -#[derive(Clone)] -pub struct Fallback { - pub servers: Vec, -} - -#[derive(Debug, PartialEq)] -pub enum FallbackError { - AllErrored(Vec), -} - -impl Fallback { - pub fn new(servers: Vec) -> Self { - Self { servers } - } - - /// Return the first successful result along with number of previous errors encountered - /// or all the errors encountered if every server fails. - pub async fn first_success<'a, F, O, E, R>( - &'a self, - func: F, - ) -> Result<(O, usize), FallbackError> - where - F: Fn(&'a T) -> R, - R: Future>, - { - let mut errors = vec![]; - for server in &self.servers { - match func(server).await { - Ok(val) => return Ok((val, errors.len())), - Err(e) => errors.push(e), - } - } - Err(FallbackError::AllErrored(errors)) - } - - pub fn map_format_error<'a, E, F, S>(&'a self, f: F, error: &FallbackError) -> String - where - F: FnMut(&'a T) -> &'a S, - S: Display + 'a, - E: Debug, - { - match error { - FallbackError::AllErrored(v) => format!( - "All fallbacks errored: {}", - join( - zip(self.servers.iter().map(f), v.iter()) - .map(|(server, error)| format!("{} => {:?}", server, error)), - ", " - ) - ), - } - } -} - -impl Fallback { - pub fn format_error(&self, error: &FallbackError) -> String { - self.map_format_error(|s| s, error) - } -} diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index 8699a8cf2c..b59a6dfb89 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -43,6 +43,16 @@ impl JsonMetric { } } } + + /// Return a default json value given given the metric type. + fn get_typed_value_default(&self) -> serde_json::Value { + match self.ty { + JsonType::Integer => json!(0), + JsonType::Boolean => { + json!(false) + } + } + } } /// The required metrics for the beacon and validator processes. @@ -155,6 +165,16 @@ pub fn gather_metrics(metrics_map: &HashMap) -> Option( .value_of("eth1-endpoint") .map(|e| { warn!("The --eth1-endpoint flag is deprecated. Please use --eth1-endpoints instead"); - vec![String::from(e)] + String::from(e) }) - .or_else(|| { - matches - .value_of("eth1-endpoints") - .map(|s| s.split(',').map(String::from).collect()) - }); + .or_else(|| matches.value_of("eth1-endpoints").map(String::from)); let mut eth2_network_config = Eth2NetworkConfig::load(testnet_dir.clone())?; @@ -35,12 +31,9 @@ pub fn run( let mut config = Eth1Config::default(); if let Some(v) = endpoints.clone() { - let endpoints = v - .iter() - .map(|s| SensitiveUrl::parse(s)) - .collect::>() + let endpoint = SensitiveUrl::parse(&v) .map_err(|e| format!("Unable to parse eth1 endpoint URL: {:?}", e))?; - config.endpoints = Eth1Endpoint::NoAuth(endpoints); + config.endpoint = Eth1Endpoint::NoAuth(endpoint); } config.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); config.deposit_contract_deploy_block = eth2_network_config.deposit_contract_deploy_block; @@ -49,7 +42,7 @@ pub fn run( config.node_far_behind_seconds = max(5, config.follow_distance) * spec.seconds_per_eth1_block; let genesis_service = - Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone()); + Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone())?; env.runtime().block_on(async { let _ = genesis_service diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 661bbcdb0c..288d18c1fa 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -68,7 +68,7 @@ fn staking_flag() { assert!(config.http_api.enabled); assert!(config.sync_eth1_chain); assert_eq!( - config.eth1.endpoints.get_endpoints()[0].to_string(), + config.eth1.endpoint.get_endpoint().to_string(), DEFAULT_ETH1_ENDPOINT ); }); @@ -293,28 +293,17 @@ fn eth1_flag() { #[test] fn eth1_endpoints_flag() { CommandLineTest::new() - .flag( - "eth1-endpoints", - Some("http://localhost:9545,https://infura.io/secret"), - ) + .flag("eth1-endpoints", Some("http://localhost:9545")) .run_with_zero_port() .with_config(|config| { assert_eq!( - config.eth1.endpoints.get_endpoints()[0].full.to_string(), + config.eth1.endpoint.get_endpoint().full.to_string(), "http://localhost:9545/" ); assert_eq!( - config.eth1.endpoints.get_endpoints()[0].to_string(), + config.eth1.endpoint.get_endpoint().to_string(), "http://localhost:9545/" ); - assert_eq!( - config.eth1.endpoints.get_endpoints()[1].full.to_string(), - "https://infura.io/secret" - ); - assert_eq!( - config.eth1.endpoints.get_endpoints()[1].to_string(), - "https://infura.io/" - ); assert!(config.sync_eth1_chain); }); } @@ -429,7 +418,7 @@ fn run_execution_endpoints_overrides_eth1_endpoints_test(eth1_flag: &str, execut // The eth1 endpoint should have been set to the --execution-endpoint value in defiance // of --eth1-endpoints. assert_eq!( - config.eth1.endpoints, + config.eth1.endpoint, Eth1Endpoint::Auth { endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), jwt_path: jwt_path.clone(), @@ -624,7 +613,7 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl assert_eq!(el_config.jwt_id, Some(id.to_string())); assert_eq!(el_config.jwt_version, Some(version.to_string())); assert_eq!( - config.eth1.endpoints, + config.eth1.endpoint, Eth1Endpoint::Auth { endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), jwt_path: dir.path().join(jwt_file), diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 5e346d5466..182a66b498 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,4 +1,4 @@ -use crate::local_network::{EXECUTION_PORT, INVALID_ADDRESS, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; +use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; @@ -138,7 +138,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let mut beacon_config = testing_client_config(); beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoints = Eth1Endpoint::NoAuth(vec![eth1_endpoint]); + beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint); beacon_config.eth1.deposit_contract_address = deposit_contract_address; beacon_config.eth1.deposit_contract_deploy_block = 0; beacon_config.eth1.lowest_cached_block_number = 0; @@ -173,18 +173,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { /* * One by one, add beacon nodes to the network. */ - for i in 0..node_count - 1 { - let mut config = beacon_config.clone(); - if i % 2 == 0 { - if let Eth1Endpoint::NoAuth(endpoints) = &mut config.eth1.endpoints { - endpoints.insert( - 0, - SensitiveUrl::parse(INVALID_ADDRESS) - .expect("Unable to parse invalid address"), - ) - } - } - network.add_beacon_node(config).await?; + for _ in 0..node_count - 1 { + network.add_beacon_node(beacon_config.clone()).await?; } /* From 6a92bf70e478c627b981629c98031059f11125c7 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 4 Oct 2022 08:33:40 +0000 Subject: [PATCH 03/21] CLI tests for logging flags (#3609) ## Issue Addressed Adding CLI tests for logging flags: log-color and disable-log-timestamp Which issue # does this PR address? #3588 ## Proposed Changes Add CLI tests for logging flags as described in #3588 Please list or describe the changes introduced by this PR. Added logger_config to client::Config as suggested. Implemented Default for LoggerConfig based on what was being done elsewhere in the repo. Created 2 tests for each flag addressed. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. --- Cargo.lock | 2 ++ beacon_node/client/src/config.rs | 4 +++- lcli/src/main.rs | 4 ++-- lighthouse/environment/Cargo.toml | 2 ++ lighthouse/environment/src/lib.rs | 33 ++++++++++++++++++------- lighthouse/src/main.rs | 11 +++++---- lighthouse/tests/beacon_node.rs | 36 ++++++++++++++++++++++++++++ testing/simulator/src/eth1_sim.rs | 9 +++---- testing/simulator/src/no_eth1_sim.rs | 9 +++---- testing/simulator/src/sync_sim.rs | 6 ++--- 10 files changed, 85 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8fb8c54929..da07bf4250 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1631,6 +1631,8 @@ dependencies = [ "exit-future", "futures", "logging", + "serde", + "serde_derive", "slog", "slog-async", "slog-json", diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index a5d5b37c7a..5e43c1eaad 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,11 +1,11 @@ use directory::DEFAULT_ROOT_DIR; +use environment::LoggerConfig; use network::NetworkConfig; use sensitive_url::SensitiveUrl; use serde_derive::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use types::{Graffiti, PublicKeyBytes}; - /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; @@ -72,6 +72,7 @@ pub struct Config { pub http_metrics: http_metrics::Config, pub monitoring_api: Option, pub slasher: Option, + pub logger_config: LoggerConfig, } impl Default for Config { @@ -96,6 +97,7 @@ impl Default for Config { slasher: None, validator_monitor_auto: false, validator_monitor_pubkeys: vec![], + logger_config: LoggerConfig::default(), } } } diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 11a23fe0b4..8b233d847b 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -781,8 +781,8 @@ fn run( .map_err(|e| format!("should start tokio runtime: {:?}", e))? .initialize_logger(LoggerConfig { path: None, - debug_level: "trace", - logfile_debug_level: "trace", + debug_level: String::from("trace"), + logfile_debug_level: String::from("trace"), log_format: None, log_color: false, disable_log_timestamp: false, diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 7dc31e06bf..1ba0bb267c 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -18,6 +18,8 @@ slog-async = "2.5.0" futures = "0.3.7" slog-json = "2.3.0" exit-future = "0.2.0" +serde = "1.0.116" +serde_derive = "1.0.116" [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 46348e63ba..49163b96f4 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -12,6 +12,7 @@ use eth2_network_config::Eth2NetworkConfig; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; +use serde_derive::{Deserialize, Serialize}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; use std::fs::create_dir_all; @@ -43,17 +44,33 @@ const MAXIMUM_SHUTDOWN_TIME: u64 = 15; /// - `path` == None, /// - `max_log_size` == 0, /// - `max_log_number` == 0, -pub struct LoggerConfig<'a> { +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LoggerConfig { pub path: Option, - pub debug_level: &'a str, - pub logfile_debug_level: &'a str, - pub log_format: Option<&'a str>, + pub debug_level: String, + pub logfile_debug_level: String, + pub log_format: Option, pub log_color: bool, pub disable_log_timestamp: bool, pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, } +impl Default for LoggerConfig { + fn default() -> Self { + LoggerConfig { + path: None, + debug_level: String::from("info"), + logfile_debug_level: String::from("debug"), + log_format: None, + log_color: false, + disable_log_timestamp: false, + max_log_size: 200, + max_log_number: 5, + compression: false, + } + } +} /// Builds an `Environment`. pub struct EnvironmentBuilder { @@ -135,7 +152,7 @@ impl EnvironmentBuilder { /// Note that background file logging will spawn a new thread. pub fn initialize_logger(mut self, config: LoggerConfig) -> Result { // Setting up the initial logger format and build it. - let stdout_drain = if let Some(format) = config.log_format { + let stdout_drain = if let Some(ref format) = config.log_format { match format.to_uppercase().as_str() { "JSON" => { let stdout_drain = slog_json::Json::default(std::io::stdout()).fuse(); @@ -168,7 +185,7 @@ impl EnvironmentBuilder { .build() }; - let stdout_drain = match config.debug_level { + let stdout_drain = match config.debug_level.as_str() { "info" => stdout_drain.filter_level(Level::Info), "debug" => stdout_drain.filter_level(Level::Debug), "trace" => stdout_drain.filter_level(Level::Trace), @@ -220,7 +237,7 @@ impl EnvironmentBuilder { } } - let logfile_level = match config.logfile_debug_level { + let logfile_level = match config.logfile_debug_level.as_str() { "info" => Severity::Info, "debug" => Severity::Debug, "trace" => Severity::Trace, @@ -233,7 +250,7 @@ impl EnvironmentBuilder { let file_logger = FileLoggerBuilder::new(&path) .level(logfile_level) .channel_size(LOG_CHANNEL_SIZE) - .format(match config.log_format { + .format(match config.log_format.as_deref() { Some("JSON") => Format::Json, _ => Format::default(), }) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 341e1a91d5..9dc0902e06 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -438,9 +438,9 @@ fn run( let logger_config = LoggerConfig { path: log_path, - debug_level, - logfile_debug_level, - log_format, + debug_level: String::from(debug_level), + logfile_debug_level: String::from(logfile_debug_level), + log_format: log_format.map(String::from), log_color, disable_log_timestamp, max_log_size: logfile_max_size * 1_024 * 1_024, @@ -448,7 +448,7 @@ fn run( compression: logfile_compress, }; - let builder = environment_builder.initialize_logger(logger_config)?; + let builder = environment_builder.initialize_logger(logger_config.clone())?; let mut environment = builder .multi_threaded_tokio_runtime()? @@ -528,7 +528,8 @@ fn run( let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); - let config = beacon_node::get_config::(matches, &context)?; + let mut config = beacon_node::get_config::(matches, &context)?; + config.logger_config = logger_config; let shutdown_flag = matches.is_present("immediate-shutdown"); // Dump configs if `dump-config` or `dump-chain-config` flags are set clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 288d18c1fa..2e76d832ce 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1454,3 +1454,39 @@ fn monitoring_endpoint() { assert_eq!(api_conf.update_period_secs, Some(30)); }); } + +// Tests for Logger flags. +#[test] +fn default_log_color_flag() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(!config.logger_config.log_color); + }); +} +#[test] +fn enabled_log_color_flag() { + CommandLineTest::new() + .flag("log-color", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.log_color); + }); +} +#[test] +fn default_disable_log_timestamp_flag() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(!config.logger_config.disable_log_timestamp); + }); +} +#[test] +fn enabled_disable_log_timestamp_flag() { + CommandLineTest::new() + .flag("disable-log-timestamp", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.disable_log_timestamp); + }); +} diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 182a66b498..3d59013f2a 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -56,15 +56,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { }) .collect::>(); - let log_level = "debug"; - let log_format = None; - let mut env = EnvironmentBuilder::minimal() .initialize_logger(LoggerConfig { path: None, - debug_level: log_level, - logfile_debug_level: "debug", - log_format, + debug_level: String::from("debug"), + logfile_debug_level: String::from("debug"), + log_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 57e2e01eb6..06f9e9a4f3 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -41,15 +41,12 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { }) .collect::>(); - let log_level = "debug"; - let log_format = None; - let mut env = EnvironmentBuilder::mainnet() .initialize_logger(LoggerConfig { path: None, - debug_level: log_level, - logfile_debug_level: "debug", - log_format, + debug_level: String::from("debug"), + logfile_debug_level: String::from("debug"), + log_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index af5ba95e01..00e439e4c9 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -48,9 +48,9 @@ fn syncing_sim( let mut env = EnvironmentBuilder::minimal() .initialize_logger(LoggerConfig { path: None, - debug_level: log_level, - logfile_debug_level: "debug", - log_format, + debug_level: String::from(log_level), + logfile_debug_level: String::from("debug"), + log_format: log_format.map(String::from), log_color: false, disable_log_timestamp: false, max_log_size: 0, From 4926e3967fe3192fa019942e7450de6cf6ed607c Mon Sep 17 00:00:00 2001 From: Divma Date: Tue, 4 Oct 2022 10:37:48 +0000 Subject: [PATCH 04/21] [DEV FEATURE] Deterministic long lived subnets (#3453) ## Issue Addressed #2847 ## Proposed Changes Add under a feature flag the required changes to subscribe to long lived subnets in a deterministic way ## Additional Info There is an additional required change that is actually searching for peers using the prefix, but I find that it's best to make this change in the future --- Cargo.lock | 1 + beacon_node/network/Cargo.toml | 5 + beacon_node/network/src/service.rs | 10 +- .../src/subnet_service/attestation_subnets.rs | 231 +++++++++++++++++- .../network/src/subnet_service/tests/mod.rs | 94 +++++-- consensus/types/src/chain_spec.rs | 25 ++ consensus/types/src/subnet_id.rs | 42 +++- 7 files changed, 371 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da07bf4250..d0e3622e77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4150,6 +4150,7 @@ dependencies = [ "error-chain", "eth2_ssz", "eth2_ssz_types", + "ethereum-types 0.12.1", "exit-future", "fnv", "futures", diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 87c7650fb5..2e7b2227b2 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -44,3 +44,8 @@ strum = "0.24.0" tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" delay_map = "0.1.1" +ethereum-types = { version = "0.12.1", optional = true } + +[features] +deterministic_long_lived_attnets = [ "ethereum-types" ] +# default = ["deterministic_long_lived_attnets"] diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index ec8573ea1f..31c42b860d 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -299,9 +299,13 @@ impl NetworkService { )?; // attestation subnet service - let attestation_service = - AttestationService::new(beacon_chain.clone(), config, &network_log); - + let attestation_service = AttestationService::new( + beacon_chain.clone(), + #[cfg(feature = "deterministic_long_lived_attnets")] + network_globals.local_enr().node_id().raw().into(), + config, + &network_log, + ); // sync committee subnet service let sync_committee_service = SyncCommitteeService::new(beacon_chain.clone(), config, &network_log); diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index ee8ba24fc3..70ba1c8170 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -3,7 +3,7 @@ //! determines whether attestations should be aggregated and/or passed to the beacon node. use super::SubnetServiceMessage; -#[cfg(test)] +#[cfg(any(test, feature = "deterministic_long_lived_attnets"))] use std::collections::HashSet; use std::collections::{HashMap, VecDeque}; use std::pin::Pin; @@ -15,6 +15,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use delay_map::{HashMapDelay, HashSetDelay}; use futures::prelude::*; use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; +#[cfg(not(feature = "deterministic_long_lived_attnets"))] use rand::seq::SliceRandom; use slog::{debug, error, o, trace, warn}; use slot_clock::SlotClock; @@ -28,6 +29,7 @@ use crate::metrics; pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// The time (in slots) before a last seen validator is considered absent and we unsubscribe from /// the random gossip topics that we subscribed to due to the validator connection. +#[cfg(not(feature = "deterministic_long_lived_attnets"))] const LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS: u32 = 150; /// The fraction of a slot that we subscribe to a subnet before the required slot. /// @@ -70,6 +72,9 @@ pub struct AttestationService { /// Subnets we are currently subscribed to as long lived subscriptions. /// /// We advertise these in our ENR. When these expire, the subnet is removed from our ENR. + #[cfg(feature = "deterministic_long_lived_attnets")] + long_lived_subscriptions: HashSet, + #[cfg(not(feature = "deterministic_long_lived_attnets"))] long_lived_subscriptions: HashMapDelay, /// Short lived subscriptions that need to be done in the future. @@ -83,6 +88,7 @@ pub struct AttestationService { /// subscribed to. As these time out, we unsubscribe for the required random subnets and update /// our ENR. /// This is a set of validator indices. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] known_validators: HashSetDelay, /// The waker for the current thread. @@ -95,8 +101,17 @@ pub struct AttestationService { subscribe_all_subnets: bool, /// For how many slots we subscribe to long lived subnets. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] long_lived_subnet_subscription_slots: u64, + /// Our Discv5 node_id. + #[cfg(feature = "deterministic_long_lived_attnets")] + node_id: ethereum_types::U256, + + /// Future used to manage subscribing and unsubscribing from long lived subnets. + #[cfg(feature = "deterministic_long_lived_attnets")] + next_long_lived_subscription_event: Pin>, + /// The logger for the attestation service. log: slog::Logger, } @@ -104,6 +119,7 @@ pub struct AttestationService { impl AttestationService { /* Public functions */ + #[cfg(not(feature = "deterministic_long_lived_attnets"))] pub fn new( beacon_chain: Arc>, config: &NetworkConfig, @@ -145,31 +161,85 @@ impl AttestationService { } } + #[cfg(feature = "deterministic_long_lived_attnets")] + pub fn new( + beacon_chain: Arc>, + node_id: ethereum_types::U256, + config: &NetworkConfig, + log: &slog::Logger, + ) -> Self { + let log = log.new(o!("service" => "attestation_service")); + + // Calculate the random subnet duration from the spec constants. + let slot_duration = beacon_chain.slot_clock.slot_duration(); + + slog::info!(log, "Deterministic long lived subnets enabled"; "subnets_per_node" => beacon_chain.spec.subnets_per_node); + + let track_validators = !config.import_all_attestations; + let aggregate_validators_on_subnet = + track_validators.then(|| HashSetDelay::new(slot_duration)); + let mut service = AttestationService { + events: VecDeque::with_capacity(10), + beacon_chain, + short_lived_subscriptions: HashMapDelay::new(slot_duration), + long_lived_subscriptions: HashSet::default(), + scheduled_short_lived_subscriptions: HashSetDelay::default(), + aggregate_validators_on_subnet, + waker: None, + discovery_disabled: config.disable_discovery, + subscribe_all_subnets: config.subscribe_all_subnets, + node_id, + next_long_lived_subscription_event: { + // Set a dummy sleep. Calculating the current subnet subscriptions will update this + // value with a smarter timing + Box::pin(tokio::time::sleep(Duration::from_secs(1))) + }, + log, + }; + service.recompute_long_lived_subnets(); + service + } + /// Return count of all currently subscribed subnets (long-lived **and** short-lived). #[cfg(test)] pub fn subscription_count(&self) -> usize { if self.subscribe_all_subnets { self.beacon_chain.spec.attestation_subnet_count as usize } else { - self.short_lived_subscriptions + #[cfg(feature = "deterministic_long_lived_attnets")] + let count = self + .short_lived_subscriptions + .keys() + .chain(self.long_lived_subscriptions.iter()) + .collect::>() + .len(); + #[cfg(not(feature = "deterministic_long_lived_attnets"))] + let count = self + .short_lived_subscriptions .keys() .chain(self.long_lived_subscriptions.keys()) .collect::>() - .len() + .len(); + count } } - /// Give access to the current subscriptions for testing purposes. + /// Returns whether we are subscribed to a subnet for testing purposes. #[cfg(test)] - pub(crate) fn subscriptions( + pub(crate) fn is_subscribed( &self, + subnet_id: &SubnetId, subscription_kind: SubscriptionKind, - ) -> &HashMapDelay { + ) -> bool { match subscription_kind { - SubscriptionKind::LongLived => &self.long_lived_subscriptions, - SubscriptionKind::ShortLived => &self.short_lived_subscriptions, + #[cfg(feature = "deterministic_long_lived_attnets")] + SubscriptionKind::LongLived => self.long_lived_subscriptions.contains(subnet_id), + #[cfg(not(feature = "deterministic_long_lived_attnets"))] + SubscriptionKind::LongLived => self.long_lived_subscriptions.contains_key(subnet_id), + SubscriptionKind::ShortLived => self.short_lived_subscriptions.contains_key(subnet_id), } } + /// Processes a list of validator subscriptions. /// /// This will: @@ -197,6 +267,7 @@ impl AttestationService { "Validator subscription"; "subscription" => ?subscription, ); + #[cfg(not(feature = "deterministic_long_lived_attnets"))] self.add_known_validator(subscription.validator_index); let subnet_id = match SubnetId::compute_subnet::( @@ -267,6 +338,111 @@ impl AttestationService { Ok(()) } + #[cfg(feature = "deterministic_long_lived_attnets")] + fn recompute_long_lived_subnets(&mut self) { + // Ensure the next computation is scheduled even if assigning subnets fails. + let next_subscription_event = self + .recompute_long_lived_subnets_inner() + .unwrap_or_else(|_| self.beacon_chain.slot_clock.slot_duration()); + + debug!(self.log, "Recomputing deterministic long lived attnets"); + self.next_long_lived_subscription_event = + Box::pin(tokio::time::sleep(next_subscription_event)); + + if let Some(waker) = self.waker.as_ref() { + waker.wake_by_ref(); + } + } + + /// Gets the long lived subnets the node should be subscribed to during the current epoch and + /// the remaining duration for which they remain valid. + #[cfg(feature = "deterministic_long_lived_attnets")] + fn recompute_long_lived_subnets_inner(&mut self) -> Result { + let current_epoch = self.beacon_chain.epoch().map_err( + |e| error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e), + )?; + + let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( + self.node_id, + current_epoch, + &self.beacon_chain.spec, + ) + .map_err(|e| error!(self.log, "Could not compute subnets for current epoch"; "err" => e))?; + + let next_subscription_slot = + next_subscription_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let next_subscription_event = self + .beacon_chain + .slot_clock + .duration_to_slot(next_subscription_slot) + .ok_or_else(|| { + error!( + self.log, + "Failed to compute duration to next to long lived subscription event" + ) + })?; + + self.update_long_lived_subnets(subnets.collect()); + + Ok(next_subscription_event) + } + + #[cfg(all(test, feature = "deterministic_long_lived_attnets"))] + pub fn update_long_lived_subnets_testing(&mut self, subnets: HashSet) { + self.update_long_lived_subnets(subnets) + } + + /// Updates the long lived subnets. + /// + /// New subnets are registered as subscribed, removed subnets as unsubscribed and the Enr + /// updated accordingly. + #[cfg(feature = "deterministic_long_lived_attnets")] + fn update_long_lived_subnets(&mut self, mut subnets: HashSet) { + for subnet in &subnets { + // Add the events for those subnets that are new as long lived subscriptions. + if !self.long_lived_subscriptions.contains(subnet) { + // Check if this subnet is new and send the subscription event if needed. + if !self.short_lived_subscriptions.contains_key(subnet) { + debug!(self.log, "Subscribing to subnet"; + "subnet" => ?subnet, + "subscription_kind" => ?SubscriptionKind::LongLived, + ); + self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( + *subnet, + ))); + } + self.queue_event(SubnetServiceMessage::EnrAdd(Subnet::Attestation(*subnet))); + if !self.discovery_disabled { + self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet: Subnet::Attestation(*subnet), + min_ttl: None, + }])) + } + } + } + + // Check for subnets that are being removed + std::mem::swap(&mut self.long_lived_subscriptions, &mut subnets); + for subnet in subnets { + if !self.long_lived_subscriptions.contains(&subnet) { + if !self.short_lived_subscriptions.contains_key(&subnet) { + debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet, "subscription_kind" => ?SubscriptionKind::LongLived); + self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + subnet, + ))); + } + + self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation(subnet))); + } + } + } + + /// Overwrites the long lived subscriptions for testing. + #[cfg(all(test, feature = "deterministic_long_lived_attnets"))] + pub fn set_long_lived_subscriptions(&mut self, subnets: HashSet) { + self.long_lived_subscriptions = subnets + } + /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip /// verification, re-propagates and returns false. pub fn should_process_attestation( @@ -377,6 +553,7 @@ impl AttestationService { // This is a current or past slot, we subscribe immediately. self.subscribe_to_subnet_immediately( subnet_id, + #[cfg(not(feature = "deterministic_long_lived_attnets"))] SubscriptionKind::ShortLived, slot + 1, )?; @@ -391,6 +568,7 @@ impl AttestationService { } /// Updates the `known_validators` mapping and subscribes to long lived subnets if required. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn add_known_validator(&mut self, validator_index: u64) { let previously_known = self.known_validators.contains_key(&validator_index); // Add the new validator or update the current timeout for a known validator. @@ -405,6 +583,7 @@ impl AttestationService { /// Subscribe to long-lived random subnets and update the local ENR bitfield. /// The number of subnets to subscribe depends on the number of active validators and number of /// current subscriptions. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn subscribe_to_random_subnets(&mut self) { if self.subscribe_all_subnets { // This case is not handled by this service. @@ -468,9 +647,12 @@ impl AttestationService { /// Checks that the time in which the subscription would end is not in the past. If we are /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send /// out the appropriate events. + /// + /// On determinist long lived subnets, this is only used for short lived subscriptions. fn subscribe_to_subnet_immediately( &mut self, subnet_id: SubnetId, + #[cfg(not(feature = "deterministic_long_lived_attnets"))] subscription_kind: SubscriptionKind, end_slot: Slot, ) -> Result<(), &'static str> { @@ -490,9 +672,13 @@ impl AttestationService { return Err("Time when subscription would end has already passed."); } + #[cfg(feature = "deterministic_long_lived_attnets")] + let subscription_kind = SubscriptionKind::ShortLived; + // We need to check and add a subscription for the right kind, regardless of the presence // of the subnet as a subscription of the other kind. This is mainly since long lived // subscriptions can be removed at any time when a validator goes offline. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] let (subscriptions, already_subscribed_as_other_kind) = match subscription_kind { SubscriptionKind::ShortLived => ( &mut self.short_lived_subscriptions, @@ -504,6 +690,12 @@ impl AttestationService { ), }; + #[cfg(feature = "deterministic_long_lived_attnets")] + let (subscriptions, already_subscribed_as_other_kind) = ( + &mut self.short_lived_subscriptions, + self.long_lived_subscriptions.contains(&subnet_id), + ); + match subscriptions.get(&subnet_id) { Some(current_end_slot) => { // We are already subscribed. Check if we need to extend the subscription. @@ -535,6 +727,7 @@ impl AttestationService { } // If this is a new long lived subscription, send out the appropriate events. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] if SubscriptionKind::LongLived == subscription_kind { let subnet = Subnet::Attestation(subnet_id); // Advertise this subnet in our ENR. @@ -564,6 +757,7 @@ impl AttestationService { /// /// This function selects a new subnet to join, or extends the expiry if there are no more /// available subnets to choose from. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId) { self.handle_removed_subnet(subnet_id, SubscriptionKind::LongLived); @@ -576,12 +770,15 @@ impl AttestationService { // subscription of the other kind. For long lived subscriptions, it also removes the // advertisement from our ENR. fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) { - let other_subscriptions = match subscription_kind { - SubscriptionKind::LongLived => &self.short_lived_subscriptions, - SubscriptionKind::ShortLived => &self.long_lived_subscriptions, + let exists_in_other_subscriptions = match subscription_kind { + SubscriptionKind::LongLived => self.short_lived_subscriptions.contains_key(&subnet_id), + #[cfg(feature = "deterministic_long_lived_attnets")] + SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains(&subnet_id), + #[cfg(not(feature = "deterministic_long_lived_attnets"))] + SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains_key(&subnet_id), }; - if !other_subscriptions.contains_key(&subnet_id) { + if !exists_in_other_subscriptions { // Subscription no longer exists as short lived or long lived. debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet_id, "subscription_kind" => ?subscription_kind); self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( @@ -603,6 +800,7 @@ impl AttestationService { /// We don't keep track of a specific validator to random subnet, rather the ratio of active /// validators to random subnets. So when a validator goes offline, we can simply remove the /// allocated amount of random subnets. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn handle_known_validator_expiry(&mut self) { // Calculate how many subnets should we remove. let extra_subnet_count = { @@ -659,6 +857,7 @@ impl Stream for AttestationService { // Process first any known validator expiries, since these affect how many long lived // subnets we need. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] match self.known_validators.poll_next_unpin(cx) { Poll::Ready(Some(Ok(_validator_index))) => { self.handle_known_validator_expiry(); @@ -669,12 +868,19 @@ impl Stream for AttestationService { Poll::Ready(None) | Poll::Pending => {} } + #[cfg(feature = "deterministic_long_lived_attnets")] + match self.next_long_lived_subscription_event.as_mut().poll(cx) { + Poll::Ready(_) => self.recompute_long_lived_subnets(), + Poll::Pending => {} + } + // Process scheduled subscriptions that might be ready, since those can extend a soon to // expire subscription. match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) { Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => { if let Err(e) = self.subscribe_to_subnet_immediately( subnet_id, + #[cfg(not(feature = "deterministic_long_lived_attnets"))] SubscriptionKind::ShortLived, slot + 1, ) { @@ -699,6 +905,7 @@ impl Stream for AttestationService { } // Process any random subnet expiries. + #[cfg(not(feature = "deterministic_long_lived_attnets"))] match self.long_lived_subscriptions.poll_next_unpin(cx) { Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { self.handle_random_subnet_expiry(subnet_id) diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 65ca9f2194..30f030eba7 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -123,7 +123,15 @@ fn get_attestation_service( let beacon_chain = CHAIN.chain.clone(); - AttestationService::new(beacon_chain, &config, &log) + AttestationService::new( + beacon_chain, + #[cfg(feature = "deterministic_long_lived_attnets")] + lighthouse_network::discv5::enr::NodeId::random() + .raw() + .into(), + &config, + &log, + ) } fn get_sync_committee_service() -> SyncCommitteeService { @@ -170,6 +178,9 @@ async fn get_events + Unpin>( mod attestation_service { + #[cfg(feature = "deterministic_long_lived_attnets")] + use std::collections::HashSet; + use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; use super::*; @@ -190,6 +201,7 @@ mod attestation_service { } } + #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn get_subscriptions( validator_count: u64, slot: Slot, @@ -268,8 +280,7 @@ mod attestation_service { // If the long lived and short lived subnets are the same, there should be no more events // as we don't resubscribe already subscribed subnets. if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id) + .is_subscribed(&subnet_id, attestation_subnets::SubscriptionKind::LongLived) { assert_eq!(expected[..], events[3..]); } @@ -352,11 +363,12 @@ mod attestation_service { let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); - // Should be still subscribed to 1 long lived and 1 short lived subnet if both are different. - if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id1) - { + // Should be still subscribed to 1 long lived and 1 short lived subnet if both are + // different. + if !attestation_service.is_subscribed( + &subnet_id1, + attestation_subnets::SubscriptionKind::LongLived, + ) { assert_eq!(expected, events[3]); assert_eq!(attestation_service.subscription_count(), 2); } else { @@ -366,11 +378,12 @@ mod attestation_service { // Get event for 1 more slot duration, we should get the unsubscribe event now. let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; - // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id1) - { + // If the long lived and short lived subnets are different, we should get an unsubscription + // event. + if !attestation_service.is_subscribed( + &subnet_id1, + attestation_subnets::SubscriptionKind::LongLived, + ) { assert_eq!( [SubnetServiceMessage::Unsubscribe(Subnet::Attestation( subnet_id1 @@ -383,6 +396,7 @@ mod attestation_service { assert_eq!(attestation_service.subscription_count(), 1); } + #[cfg(not(feature = "deterministic_long_lived_attnets"))] #[tokio::test] async fn subscribe_all_random_subnets() { let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; @@ -440,6 +454,7 @@ mod attestation_service { // test completed successfully } + #[cfg(not(feature = "deterministic_long_lived_attnets"))] #[tokio::test] async fn subscribe_all_random_subnets_plus_one() { let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; @@ -573,10 +588,10 @@ mod attestation_service { let expected_unsubscription = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); - if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id1) - { + if !attestation_service.is_subscribed( + &subnet_id1, + attestation_subnets::SubscriptionKind::LongLived, + ) { assert_eq!(expected_subscription, events[3]); // fourth is a discovery event assert_eq!(expected_unsubscription, events[5]); @@ -600,10 +615,10 @@ mod attestation_service { let second_subscribe_event = get_events(&mut attestation_service, None, 2).await; // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service - .subscriptions(attestation_subnets::SubscriptionKind::LongLived) - .contains_key(&subnet_id1) - { + if !attestation_service.is_subscribed( + &subnet_id1, + attestation_subnets::SubscriptionKind::LongLived, + ) { assert_eq!( [SubnetServiceMessage::Subscribe(Subnet::Attestation( subnet_id1 @@ -612,6 +627,43 @@ mod attestation_service { ); } } + + #[tokio::test] + #[cfg(feature = "deterministic_long_lived_attnets")] + async fn test_update_deterministic_long_lived_subnets() { + let mut attestation_service = get_attestation_service(None); + let new_subnet = SubnetId::new(1); + let maintained_subnet = SubnetId::new(2); + let removed_subnet = SubnetId::new(3); + + attestation_service + .set_long_lived_subscriptions(HashSet::from([removed_subnet, maintained_subnet])); + // clear initial events + let _events = get_events(&mut attestation_service, None, 1).await; + + attestation_service + .update_long_lived_subnets_testing(HashSet::from([maintained_subnet, new_subnet])); + + let events = get_events(&mut attestation_service, None, 1).await; + let new_subnet = Subnet::Attestation(new_subnet); + let removed_subnet = Subnet::Attestation(removed_subnet); + assert_eq!( + events, + [ + // events for the new subnet + SubnetServiceMessage::Subscribe(new_subnet), + SubnetServiceMessage::EnrAdd(new_subnet), + SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet: new_subnet, + min_ttl: None + }]), + // events for the removed subnet + SubnetServiceMessage::Unsubscribe(removed_subnet), + SubnetServiceMessage::EnrRemove(removed_subnet), + ] + ); + println!("{events:?}") + } } mod sync_committee_service { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b2ba24ac3e..f68e65d7d5 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -161,6 +161,9 @@ pub struct ChainSpec { pub attestation_subnet_count: u64, pub random_subnets_per_validator: u64, pub epochs_per_random_subnet_subscription: u64, + pub subnets_per_node: u8, + pub epochs_per_subnet_subscription: u64, + attestation_subnet_extra_bits: u8, /* * Application params @@ -427,6 +430,22 @@ impl ChainSpec { Hash256::from(domain) } + #[allow(clippy::integer_arithmetic)] + pub const fn attestation_subnet_prefix_bits(&self) -> u32 { + // maybe use log2 when stable https://github.com/rust-lang/rust/issues/70887 + + // NOTE: this line is here simply to guarantee that if self.attestation_subnet_count type + // is changed, a compiler warning will be raised. This code depends on the type being u64. + let attestation_subnet_count: u64 = self.attestation_subnet_count; + let attestation_subnet_count_bits = if attestation_subnet_count == 0 { + 0 + } else { + 63 - attestation_subnet_count.leading_zeros() + }; + + self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits + } + /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. pub fn mainnet() -> Self { Self { @@ -576,9 +595,12 @@ impl ChainSpec { attestation_propagation_slot_range: 32, attestation_subnet_count: 64, random_subnets_per_validator: 1, + subnets_per_node: 1, maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + epochs_per_subnet_subscription: 256, + attestation_subnet_extra_bits: 6, /* * Application specific @@ -786,9 +808,12 @@ impl ChainSpec { attestation_propagation_slot_range: 32, attestation_subnet_count: 64, random_subnets_per_validator: 1, + subnets_per_node: 1, maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + epochs_per_subnet_subscription: 256, + attestation_subnet_extra_bits: 6, /* * Application specific diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 046ed8f33e..e1de277615 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,8 +1,9 @@ //! Identifies each shard by an integer identifier. -use crate::{AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; +use crate::{AttestationData, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; +use swap_or_not_shuffle::compute_shuffled_index; const MAX_SUBNET_ID: usize = 64; @@ -71,6 +72,45 @@ impl SubnetId { .safe_rem(spec.attestation_subnet_count)? .into()) } + + #[allow(clippy::integer_arithmetic)] + /// Computes the set of subnets the node should be subscribed to during the current epoch, + /// along with the first epoch in which these subscriptions are no longer valid. + pub fn compute_subnets_for_epoch( + node_id: ethereum_types::U256, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result<(impl Iterator, Epoch), &'static str> { + let node_id_prefix = + (node_id >> (256 - spec.attestation_subnet_prefix_bits() as usize)).as_usize(); + + let subscription_event_idx = epoch.as_u64() / spec.epochs_per_subnet_subscription; + let permutation_seed = + eth2_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); + + let num_subnets = 1 << spec.attestation_subnet_prefix_bits(); + + let permutated_prefix = compute_shuffled_index( + node_id_prefix, + num_subnets, + &permutation_seed, + spec.shuffle_round_count, + ) + .ok_or("Unable to shuffle")? as u64; + + // Get the constants we need to avoid holding a reference to the spec + let &ChainSpec { + subnets_per_node, + attestation_subnet_count, + .. + } = spec; + + let subnet_set_generator = (0..subnets_per_node).map(move |idx| { + SubnetId::new((permutated_prefix + idx as u64) % attestation_subnet_count) + }); + let valid_until_epoch = (subscription_event_idx + 1) * spec.epochs_per_subnet_subscription; + Ok((subnet_set_generator, valid_until_epoch.into())) + } } impl Deref for SubnetId { From 242ae21e5df875f04c8b9182d108aedfa1f6ae9f Mon Sep 17 00:00:00 2001 From: mariuspod <14898268+mariuspod@users.noreply.github.com> Date: Tue, 4 Oct 2022 12:41:03 +0000 Subject: [PATCH 05/21] Pass EL JWT secret key via cli flag (#3568) ## Proposed Changes In this change I've added a new beacon_node cli flag `--execution-jwt-secret-key` for passing the JWT secret directly as string. Without this flag, it was non-trivial to pass a secrets file containing a JWT secret key without compromising its contents into some management repo or fiddling around with manual file mounts for cloud-based deployments. When used in combination with environment variables, the secret can be injected into container-based systems like docker & friends quite easily. It's both possible to either specify the file_path to the JWT secret or pass the JWT secret directly. I've modified the docs and attached a test as well. ## Additional Info The logic has been adapted a bit so that either one of `--execution-jwt` or `--execution-jwt-secret-key` must be set when specifying `--execution-endpoint` so that it's still compatible with the semantics before this change and there's at least one secret provided. --- beacon_node/src/cli.rs | 12 ++++++++++- beacon_node/src/config.rs | 35 +++++++++++++++++++++++++++------ book/src/merge-migration.md | 4 ++++ lighthouse/tests/beacon_node.rs | 23 +++++++++++++++++++++- 4 files changed, 66 insertions(+), 8 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 51e8762f1c..1e51849876 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -440,7 +440,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { JSON-RPC connection. Uses the same endpoint to populate the \ deposit cache.") .takes_value(true) - .requires("execution-jwt") ) .arg( Arg::with_name("execution-jwt") @@ -452,6 +451,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .requires("execution-endpoint") .takes_value(true) ) + .arg( + Arg::with_name("execution-jwt-secret-key") + .long("execution-jwt-secret-key") + .value_name("EXECUTION-JWT-SECRET-KEY") + .alias("jwt-secret-key") + .help("Hex-encoded JWT secret for the \ + execution endpoint provided in the --execution-endpoint flag.") + .requires("execution-endpoint") + .conflicts_with("execution-jwt") + .takes_value(true) + ) .arg( Arg::with_name("execution-jwt-id") .long("execution-jwt-id") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f1d0fb35a3..ecd4d736a6 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -3,6 +3,7 @@ use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; +use execution_layer::DEFAULT_JWT_FILE; use genesis::Eth1Endpoint; use http_api::TlsConfig; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; @@ -288,12 +289,34 @@ pub fn get_config( let execution_endpoint = parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?; - // Parse a single JWT secret, logging warnings if multiple are supplied. - // - // JWTs are required if `--execution-endpoint` is supplied. - let secret_files: String = clap_utils::parse_required(cli_args, "execution-jwt")?; - let secret_file = - parse_only_one_value(&secret_files, PathBuf::from_str, "--execution-jwt", log)?; + // JWTs are required if `--execution-endpoint` is supplied. They can be either passed via + // file_path or directly as string. + + let secret_file: PathBuf; + // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. + if let Some(secret_files) = cli_args.value_of("execution-jwt") { + secret_file = + parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; + + // Check if the JWT secret key is passed directly via cli flag and persist it to the default + // file location. + } else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") { + use std::fs::File; + use std::io::Write; + secret_file = client_config.data_dir.join(DEFAULT_JWT_FILE); + let mut jwt_secret_key_file = File::create(secret_file.clone()) + .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; + jwt_secret_key_file + .write_all(jwt_secret_key.as_bytes()) + .map_err(|e| { + format!( + "Error occured while writing to jwt_secret_key file: {:?}", + e + ) + })?; + } else { + return Err("Error! Please set either --execution-jwt file_path or --execution-jwt-secret-key directly via cli when using --execution-endpoint".to_string()); + } // Parse and set the payload builder, if any. if let Some(endpoint) = cli_args.value_of("builder") { diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 104a7ead6d..780be5836d 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -48,6 +48,10 @@ If you set up an execution engine with `--execution-endpoint` then you *must* pr using `--execution-jwt`. This is a mandatory form of authentication that ensures that Lighthouse has authority to control the execution engine. +> Tip: the --execution-jwt-secret-key flag can be used instead of --execution-jwt . +> This is useful, for example, for users who wish to inject the value into a Docker container without +> needing to pass a jwt secret file. + The execution engine connection must be **exclusive**, i.e. you must have one execution node per beacon node. The reason for this is that the beacon node _controls_ the execution node. Please see the [FAQ](#faq) for further information about why many:1 and 1:many configurations are not diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 2e76d832ce..a00fd7a822 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -4,7 +4,7 @@ use crate::exec::{CommandLineTestExec, CompletedTest}; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; -use std::io::Write; +use std::io::{Read, Write}; use std::net::IpAddr; use std::path::PathBuf; use std::process::Command; @@ -386,6 +386,27 @@ fn run_merge_execution_endpoints_flag_test(flag: &str) { }); } #[test] +fn run_execution_jwt_secret_key_is_persisted() { + let jwt_secret_key = "0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33"; + CommandLineTest::new() + .flag("execution-endpoint", Some("http://localhost:8551/")) + .flag("execution-jwt-secret-key", Some(jwt_secret_key)) + .run_with_zero_port() + .with_config(|config| { + let config = config.execution_layer.as_ref().unwrap(); + assert_eq!( + config.execution_endpoints[0].full.to_string(), + "http://localhost:8551/" + ); + let mut file_jwt_secret_key = String::new(); + File::open(config.secret_files[0].clone()) + .expect("could not open jwt_secret_key file") + .read_to_string(&mut file_jwt_secret_key) + .expect("could not read from file"); + assert_eq!(file_jwt_secret_key, jwt_secret_key); + }); +} +#[test] fn merge_execution_endpoints_flag() { run_merge_execution_endpoints_flag_test("execution-endpoints") } From 9f242137b0fe1968f8b57993efb0d806e8265d1d Mon Sep 17 00:00:00 2001 From: will Date: Wed, 12 Oct 2022 23:40:42 +0000 Subject: [PATCH 06/21] Add a new bls test (#3235) ## Issue Addressed Which issue # does this PR address? #2629 ## Proposed Changes Please list or describe the changes introduced by this PR. 1. ci would dowload the bls test cases from https://github.com/ethereum/bls12-381-tests/ 2. all the bls test cases(except eth ones) would use cases in the archive from step one 3. The bls test cases from https://github.com/ethereum/consensus-spec-tests would stay there and no use . For the future , these bls test cases would be remove suggested from https://github.com/ethereum/consensus-spec-tests/issues/25 . So it would do no harm and compatible for future cases. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. Question: I am not sure if I should implement tests about `deserialization_G1`, `deserialization_G2` and `hash_to_G2` for the issue. --- crypto/bls/src/lib.rs | 8 +++ testing/ef_tests/.gitignore | 1 + testing/ef_tests/Makefile | 21 +++++- testing/ef_tests/check_all_files_accessed.py | 8 ++- testing/ef_tests/src/cases.rs | 2 + .../ef_tests/src/cases/bls_aggregate_sigs.rs | 8 +-- .../src/cases/bls_aggregate_verify.rs | 8 +-- .../ef_tests/src/cases/bls_batch_verify.rs | 67 +++++++++++++++++ .../src/cases/bls_eth_aggregate_pubkeys.rs | 4 +- .../cases/bls_eth_fast_aggregate_verify.rs | 4 +- .../src/cases/bls_fast_aggregate_verify.rs | 8 +-- testing/ef_tests/src/cases/bls_sign_msg.rs | 8 +-- testing/ef_tests/src/cases/bls_verify_msg.rs | 8 +-- testing/ef_tests/src/cases/common.rs | 34 +++++---- testing/ef_tests/src/handler.rs | 71 +++++++++++++++++-- testing/ef_tests/tests/tests.rs | 6 ++ 16 files changed, 211 insertions(+), 55 deletions(-) create mode 100644 testing/ef_tests/src/cases/bls_batch_verify.rs diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index eacbc2b268..750e1bd5b8 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -90,6 +90,7 @@ pub mod generics { pub use crate::generic_secret_key::GenericSecretKey; pub use crate::generic_signature::GenericSignature; pub use crate::generic_signature_bytes::GenericSignatureBytes; + pub use crate::generic_signature_set::WrappedSignature; } /// Defines all the fundamental BLS points which should be exported by this crate by making @@ -109,6 +110,13 @@ macro_rules! define_mod { pub type AggregatePublicKey = GenericAggregatePublicKey; pub type Signature = GenericSignature; + pub type BlsWrappedSignature<'a> = WrappedSignature< + 'a, + bls_variant::PublicKey, + bls_variant::AggregatePublicKey, + bls_variant::Signature, + bls_variant::AggregateSignature, + >; pub type AggregateSignature = GenericAggregateSignature< bls_variant::PublicKey, bls_variant::AggregatePublicKey, diff --git a/testing/ef_tests/.gitignore b/testing/ef_tests/.gitignore index f3638b7bff..6a2ca1fe75 100644 --- a/testing/ef_tests/.gitignore +++ b/testing/ef_tests/.gitignore @@ -1,2 +1,3 @@ /consensus-spec-tests .accessed_file_log.txt +/bls12-381-tests diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index dc89cb5d5f..fac1ab905a 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -4,23 +4,38 @@ TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) - BASE_URL := https://github.com/ethereum/$(REPO_NAME)/releases/download/$(TESTS_TAG) +BLS_TEST_REPO_NAME := bls12-381-tests +BLS_TEST_TAG := v0.1.1 +BLS_TEST = bls_tests_yaml +BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST)) +BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME) +BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG) + +all: + make $(OUTPUT_DIR) + make $(BLS_OUTPUT_DIR) + $(OUTPUT_DIR): $(TARBALLS) mkdir $(OUTPUT_DIR) for test_tarball in $^; do \ tar -xzf $$test_tarball -C $(OUTPUT_DIR);\ done +$(BLS_OUTPUT_DIR): + mkdir $(BLS_OUTPUT_DIR) + wget $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL) + tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR) + %-$(TESTS_TAG).tar.gz: wget $(BASE_URL)/$*.tar.gz -O $@ clean-test-files: - rm -rf $(OUTPUT_DIR) + rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) clean-archives: - rm -f $(TARBALLS) + rm -f $(TARBALLS) $(BLS_TARBALL) clean: clean-test-files clean-archives diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index a10ccf1e6f..158e875810 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -46,7 +46,13 @@ excluded_paths = [ # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. - "tests/mainnet/bellatrix/operations/deposit/pyspec_tests/deposit_with_previous_fork_version__valid_ineffective/._meta.yaml" + "tests/mainnet/bellatrix/operations/deposit/pyspec_tests/deposit_with_previous_fork_version__valid_ineffective/._meta.yaml", + # bls tests are moved to bls12-381-tests directory + "tests/general/phase0/bls", + # some bls tests are not included now + "bls12-381-tests/deserialization_G1", + "bls12-381-tests/deserialization_G2", + "bls12-381-tests/hash_to_G2" ] def normalize_path(path): diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 64f4aa7538..ae70f1e07e 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -6,6 +6,7 @@ use types::ForkName; mod bls_aggregate_sigs; mod bls_aggregate_verify; +mod bls_batch_verify; mod bls_eth_aggregate_pubkeys; mod bls_eth_fast_aggregate_verify; mod bls_fast_aggregate_verify; @@ -29,6 +30,7 @@ mod transition; pub use self::fork_choice::*; pub use bls_aggregate_sigs::*; pub use bls_aggregate_verify::*; +pub use bls_batch_verify::*; pub use bls_eth_aggregate_pubkeys::*; pub use bls_eth_fast_aggregate_verify::*; pub use bls_fast_aggregate_verify::*; diff --git a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs index e0d0dd76ae..81e186a66b 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregateSignature, Signature}; use serde_derive::Deserialize; @@ -10,13 +10,9 @@ pub struct BlsAggregateSigs { pub output: String, } -impl BlsCase for BlsAggregateSigs {} +impl_bls_load_case!(BlsAggregateSigs); impl Case for BlsAggregateSigs { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut aggregate_signature = AggregateSignature::infinity(); diff --git a/testing/ef_tests/src/cases/bls_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_aggregate_verify.rs index ea7a7664fc..e9539dc15e 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_verify.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; use serde_derive::Deserialize; use types::Hash256; @@ -18,13 +18,9 @@ pub struct BlsAggregateVerify { pub output: bool, } -impl BlsCase for BlsAggregateVerify {} +impl_bls_load_case!(BlsAggregateVerify); impl Case for BlsAggregateVerify { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let messages = self .input diff --git a/testing/ef_tests/src/cases/bls_batch_verify.rs b/testing/ef_tests/src/cases/bls_batch_verify.rs new file mode 100644 index 0000000000..de8721d67d --- /dev/null +++ b/testing/ef_tests/src/cases/bls_batch_verify.rs @@ -0,0 +1,67 @@ +use super::*; +use crate::case_result::compare_result; +use crate::impl_bls_load_case; +use bls::{verify_signature_sets, BlsWrappedSignature, PublicKeyBytes, Signature, SignatureSet}; +use serde_derive::Deserialize; +use std::borrow::Cow; +use std::str::FromStr; +use types::Hash256; + +#[derive(Debug, Clone, Deserialize)] +pub struct BlsBatchVerifyInput { + pubkeys: Vec, + messages: Vec, + signatures: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct BlsBatchVerify { + pub input: BlsBatchVerifyInput, + pub output: bool, +} + +impl_bls_load_case!(BlsBatchVerify); + +impl Case for BlsBatchVerify { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let messages = self + .input + .messages + .iter() + .map(|s| Hash256::from_str(s).map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))) + .collect::, _>>()?; + + let pubkeys = self + .input + .pubkeys + .iter() + .map(|pkb| { + pkb.decompress() + .map_err(|_| Error::FailedToParseTest("pubkeys parse error".to_string())) + }) + .collect::, _>>()?; + + let signatures = self + .input + .signatures + .iter() + .map(|s| { + Signature::from_str(s).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) + }) + .collect::, _>>()?; + + let signature_set = messages + .iter() + .zip(pubkeys.iter()) + .zip(signatures.iter()) + .map(|((&message, pubkey), signature)| { + let wraped_signature = BlsWrappedSignature::from(signature); + SignatureSet::single_pubkey(wraped_signature, Cow::Borrowed(pubkey), message) + }) + .collect::>(); + + let signature_valid = verify_signature_sets(signature_set.iter()); + + compare_result::(&Ok(signature_valid), &Some(self.output)) + } +} diff --git a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs index 2ecc3b603d..c41fbca393 100644 --- a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs +++ b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregatePublicKey, PublicKeyBytes}; use serde_derive::Deserialize; @@ -10,7 +10,7 @@ pub struct BlsEthAggregatePubkeys { pub output: Option, } -impl BlsCase for BlsEthAggregatePubkeys {} +impl_bls_load_case!(BlsEthAggregatePubkeys, "data.yaml"); impl Case for BlsEthAggregatePubkeys { fn is_enabled_for_fork(fork_name: ForkName) -> bool { diff --git a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs index 62f9eb30c3..80e018459b 100644 --- a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; use serde_derive::Deserialize; use std::convert::TryInto; @@ -20,7 +20,7 @@ pub struct BlsEthFastAggregateVerify { pub output: bool, } -impl BlsCase for BlsEthFastAggregateVerify {} +impl_bls_load_case!(BlsEthFastAggregateVerify, "data.yaml"); impl Case for BlsEthFastAggregateVerify { fn is_enabled_for_fork(fork_name: ForkName) -> bool { diff --git a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs index 9722c05dc8..608995db9d 100644 --- a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; use serde_derive::Deserialize; use std::convert::TryInto; @@ -20,13 +20,9 @@ pub struct BlsFastAggregateVerify { pub output: bool, } -impl BlsCase for BlsFastAggregateVerify {} +impl_bls_load_case!(BlsFastAggregateVerify); impl Case for BlsFastAggregateVerify { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let message = Hash256::from_slice( &hex::decode(&self.input.message[2..]) diff --git a/testing/ef_tests/src/cases/bls_sign_msg.rs b/testing/ef_tests/src/cases/bls_sign_msg.rs index ad6b40cb77..53c13b569a 100644 --- a/testing/ef_tests/src/cases/bls_sign_msg.rs +++ b/testing/ef_tests/src/cases/bls_sign_msg.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::SecretKey; use serde_derive::Deserialize; use types::Hash256; @@ -17,13 +17,9 @@ pub struct BlsSign { pub output: Option, } -impl BlsCase for BlsSign {} +impl_bls_load_case!(BlsSign); impl Case for BlsSign { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { // Convert private_key and message to required types let sk = hex::decode(&self.input.privkey[2..]) diff --git a/testing/ef_tests/src/cases/bls_verify_msg.rs b/testing/ef_tests/src/cases/bls_verify_msg.rs index 190c09d52f..779b3cf75f 100644 --- a/testing/ef_tests/src/cases/bls_verify_msg.rs +++ b/testing/ef_tests/src/cases/bls_verify_msg.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use crate::cases::common::BlsCase; +use crate::impl_bls_load_case; use bls::{PublicKeyBytes, Signature, SignatureBytes}; use serde_derive::Deserialize; use std::convert::TryInto; @@ -19,13 +19,9 @@ pub struct BlsVerify { pub output: bool, } -impl BlsCase for BlsVerify {} +impl_bls_load_case!(BlsVerify); impl Case for BlsVerify { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Base - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let message = hex::decode(&self.input.message[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index e77e561939..e15a2e2ca3 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -1,24 +1,11 @@ -use crate::cases::LoadCase; -use crate::decode::yaml_decode_file; -use crate::error::Error; use serde_derive::Deserialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::Debug; -use std::path::Path; use tree_hash::TreeHash; use types::ForkName; -/// Trait for all BLS cases to eliminate some boilerplate. -pub trait BlsCase: serde::de::DeserializeOwned {} - -impl LoadCase for T { - fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { - yaml_decode_file(&path.join("data.yaml")) - } -} - /// Macro to wrap U128 and U256 so they deserialize correctly. macro_rules! uint_wrapper { ($wrapper_name:ident, $wrapped_type:ty) => { @@ -80,3 +67,24 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. } } + +#[macro_export] +macro_rules! impl_bls_load_case { + ($case_name:ident) => { + use $crate::decode::yaml_decode_file; + impl LoadCase for $case_name { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + yaml_decode_file(&path) + } + } + }; + + ($case_name:ident, $sub_path_name:expr) => { + use $crate::decode::yaml_decode_file; + impl LoadCase for $case_name { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + yaml_decode_file(&path.join($sub_path_name)) + } + } + }; +} diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 13c0a8c54a..92d5db7fde 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -49,8 +49,9 @@ pub trait Handler { let as_directory = |entry: Result| -> Option { entry .ok() - .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) + .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap()) }; + let test_cases = fs::read_dir(&handler_path) .unwrap_or_else(|e| panic!("handler dir {} exists: {:?}", handler_path.display(), e)) .filter_map(as_directory) @@ -58,6 +59,7 @@ pub trait Handler { .filter_map(as_directory) .map(|test_case_dir| { let path = test_case_dir.path(); + let case = Self::Case::load_from_dir(&path, fork_name).expect("test should load"); (path, case) }) @@ -75,7 +77,7 @@ pub trait Handler { } } -macro_rules! bls_handler { +macro_rules! bls_eth_handler { ($runner_name: ident, $case_name:ident, $handler_name:expr) => { #[derive(Derivative)] #[derivative(Default(bound = ""))] @@ -95,8 +97,69 @@ macro_rules! bls_handler { }; } +macro_rules! bls_handler { + ($runner_name: ident, $case_name:ident, $handler_name:expr) => { + #[derive(Derivative)] + #[derivative(Default(bound = ""))] + pub struct $runner_name; + + impl Handler for $runner_name { + type Case = cases::$case_name; + + fn runner_name() -> &'static str { + "bls" + } + + fn config_name() -> &'static str { + "bls12-381-tests" + } + + fn handler_name(&self) -> String { + $handler_name.into() + } + + fn run(&self) { + let fork_name = ForkName::Base; + let fork_name_str = fork_name.to_string(); + let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("consensus-spec-tests") + .join(Self::config_name()) + .join(self.handler_name()); + + let as_file = |entry: Result| -> Option { + entry + .ok() + .filter(|e| e.file_type().map(|ty| ty.is_file()).unwrap_or(false)) + }; + let test_cases: Vec<(PathBuf, Self::Case)> = fs::read_dir(&handler_path) + .expect("handler dir exists") + .filter_map(as_file) + .map(|test_case_path| { + let path = test_case_path.path(); + let case = + Self::Case::load_from_dir(&path, fork_name).expect("test should load"); + + (path, case) + }) + .collect(); + + let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); + + let name = format!( + "{}/{}/{}", + fork_name_str, + Self::runner_name(), + self.handler_name() + ); + crate::results::assert_tests_pass(&name, &handler_path, &results); + } + } + }; +} + bls_handler!(BlsAggregateSigsHandler, BlsAggregateSigs, "aggregate"); bls_handler!(BlsSignMsgHandler, BlsSign, "sign"); +bls_handler!(BlsBatchVerifyHandler, BlsBatchVerify, "batch_verify"); bls_handler!(BlsVerifyMsgHandler, BlsVerify, "verify"); bls_handler!( BlsAggregateVerifyHandler, @@ -108,12 +171,12 @@ bls_handler!( BlsFastAggregateVerify, "fast_aggregate_verify" ); -bls_handler!( +bls_eth_handler!( BlsEthAggregatePubkeysHandler, BlsEthAggregatePubkeys, "eth_aggregate_pubkeys" ); -bls_handler!( +bls_eth_handler!( BlsEthFastAggregateVerifyHandler, BlsEthFastAggregateVerify, "eth_fast_aggregate_verify" diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 31abbd1591..2c8b9d223b 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -118,6 +118,12 @@ fn bls_verify() { BlsVerifyMsgHandler::default().run(); } +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_batch_verify() { + BlsBatchVerifyHandler::default().run(); +} + #[test] #[cfg(not(feature = "fake_crypto"))] fn bls_aggregate_verify() { From ca9dc8e0947a0ec83f31830aaabc1ffbd3c14c9c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 15 Oct 2022 22:25:51 +0000 Subject: [PATCH 07/21] Optimise HTTP validator lookups (#3559) ## Issue Addressed While digging around in some logs I noticed that queries for validators by pubkey were taking 10ms+, which seemed too long. This was due to a loop through the entire validator registry for each lookup. ## Proposed Changes Rather than using a loop through the register, this PR utilises the pubkey cache which is usually initialised at the head*. In case the cache isn't built, we fall back to the previous loop logic. In the vast majority of cases I expect the cache will be built, as the validator client queries at the `head` where all caches should be built. ## Additional Info *I had to modify the cache build that runs after fork choice to build the pubkey cache. I think it had been optimised out, perhaps accidentally. I think it's preferable to have the exit cache and the pubkey cache built on the head state, as they are required for verifying deposits and exits respectively, and we may as well build them off the hot path of block processing. Previously they'd get built the first time a deposit or exit needed to be verified. I've deleted the unused `map_state` function which was obsoleted by `map_state_and_execution_optimistic`. --- .../beacon_chain/src/canonical_head.rs | 6 ++--- beacon_node/http_api/src/lib.rs | 21 ++++++++++++++-- beacon_node/http_api/src/state_id.rs | 25 ++----------------- .../per_block_processing/verify_deposit.rs | 4 +-- consensus/types/src/beacon_state.rs | 15 +++++++++++ 5 files changed, 40 insertions(+), 31 deletions(-) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index c9bd6db0e6..53e0fbaac9 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -654,11 +654,11 @@ impl BeaconChain { }) }) .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build the committee - // caches. + // Regardless of where we got the state from, attempt to build all the + // caches except the tree hash cache. snapshot .beacon_state - .build_all_committee_caches(&self.spec) + .build_all_caches(&self.spec) .map_err(Into::into) .map(|()| snapshot) })?; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5b4fa5816d..51e97c893d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -668,9 +668,10 @@ pub fn serve( "Invalid validator ID".to_string(), )) })) + .and(log_filter.clone()) .and(warp::path::end()) .and_then( - |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { + |state_id: StateId, chain: Arc>, validator_id: ValidatorId, log| { blocking_json_task(move || { let (data, execution_optimistic) = state_id .map_state_and_execution_optimistic( @@ -678,7 +679,23 @@ pub fn serve( |state, execution_optimistic| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { - state.validators().iter().position(|v| v.pubkey == *pubkey) + // Fast path: use the pubkey cache which is probably + // initialised at the head. + match state.get_validator_index_read_only(pubkey) { + Ok(result) => result, + Err(e) => { + // Slow path, fall back to iteration. + debug!( + log, + "Validator look-up cache miss"; + "reason" => ?e, + ); + state + .validators() + .iter() + .position(|v| v.pubkey == *pubkey) + } + } } ValidatorId::Index(index) => Some(*index as usize), }; diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 051789c953..44354217bc 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -155,33 +155,12 @@ impl StateId { Ok((state, execution_optimistic)) } - /* /// Map a function across the `BeaconState` identified by `self`. /// + /// The optimistic status of the requested state is also provided to the `func` closure. + /// /// This function will avoid instantiating/copying a new state when `self` points to the head /// of the chain. - #[allow(dead_code)] - pub fn map_state( - &self, - chain: &BeaconChain, - func: F, - ) -> Result - where - F: Fn(&BeaconState) -> Result, - { - match &self.0 { - CoreStateId::Head => chain - .with_head(|snapshot| Ok(func(&snapshot.beacon_state))) - .map_err(warp_utils::reject::beacon_chain_error)?, - _ => func(&self.state(chain)?), - } - } - */ - - /// Functions the same as `map_state` but additionally computes the value of - /// `execution_optimistic` of the state identified by `self`. - /// - /// This is to avoid re-instantiating `state` unnecessarily. pub fn map_state_and_execution_optimistic( &self, chain: &BeaconChain, diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index 3b43a8b41b..181b27ca1a 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -29,9 +29,7 @@ pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> /// Returns a `Some(validator index)` if a pubkey already exists in the `validators`, /// otherwise returns `None`. /// -/// ## Errors -/// -/// Errors if the state's `pubkey_cache` is not current. +/// Builds the pubkey cache if it is not already built. pub fn get_existing_validator_index( state: &mut BeaconState, pub_key: &PublicKeyBytes, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a5d00cdf2d..46a431d073 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -447,6 +447,21 @@ impl BeaconState { Ok(self.pubkey_cache().get(pubkey)) } + /// Immutable variant of `get_validator_index` which errors if the cache is not up to date. + pub fn get_validator_index_read_only( + &self, + pubkey: &PublicKeyBytes, + ) -> Result, Error> { + let pubkey_cache = self.pubkey_cache(); + if pubkey_cache.len() != self.validators().len() { + return Err(Error::PubkeyCacheIncomplete { + cache_len: pubkey_cache.len(), + registry_len: self.validators().len(), + }); + } + Ok(pubkey_cache.get(pubkey)) + } + /// The epoch corresponding to `self.slot()`. pub fn current_epoch(&self) -> Epoch { self.slot().epoch(T::slots_per_epoch()) From e4cbdc1c77d94e935ab838b2c2b1d5c4d7bf4018 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 15 Oct 2022 22:25:52 +0000 Subject: [PATCH 08/21] Optimistic sync spec tests (v1.2.0) (#3564) ## Issue Addressed Implements new optimistic sync test format from https://github.com/ethereum/consensus-specs/pull/2982. ## Proposed Changes - Add parsing and runner support for the new test format. - Extend the mock EL with a set of canned responses keyed by block hash. Although this doubles up on some of the existing functionality I think it's really nice to use compared to the `preloaded_responses` or static responses. I think we could write novel new opt sync tests using these primtives much more easily than the previous ones. Forks are natively supported, and different responses to `forkchoiceUpdated` and `newPayload` are also straight-forward. ## Additional Info Blocked on merge of the spec PR and release of new test vectors. --- Cargo.lock | 1 + .../src/engine_api/json_structures.rs | 4 +- .../src/test_utils/handle_rpc.rs | 14 ++++ .../execution_layer/src/test_utils/mod.rs | 46 +++++++++++++ testing/ef_tests/Cargo.toml | 1 + testing/ef_tests/Makefile | 2 +- .../ef_tests/src/cases/bls_aggregate_sigs.rs | 17 +++-- testing/ef_tests/src/cases/fork_choice.rs | 69 ++++++++++++++++--- testing/ef_tests/src/cases/operations.rs | 5 ++ testing/ef_tests/src/handler.rs | 31 +++++++++ testing/ef_tests/tests/tests.rs | 6 ++ 11 files changed, 177 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d0e3622e77..34c932307d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1500,6 +1500,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "ethereum-types 0.12.1", + "execution_layer", "fork_choice", "fs2", "hex", diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 31aa79f055..2b0c3a4c98 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,5 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; +use strum::EnumString; use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -311,8 +312,9 @@ impl From for ForkChoiceState { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, EnumString)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[strum(serialize_all = "SCREAMING_SNAKE_CASE")] pub enum JsonPayloadStatusV1Status { Valid, Invalid, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 975f09fa5e..ac677bf331 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -77,6 +77,11 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; + // Canned responses set by block hash take priority. + if let Some(status) = ctx.get_new_payload_status(&request.block_hash) { + return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); + } + let (static_response, should_import) = if let Some(mut response) = ctx.static_new_payload_response.lock().clone() { if response.status.status == PayloadStatusV1Status::Valid { @@ -120,6 +125,15 @@ pub async fn handle_rpc( let head_block_hash = forkchoice_state.head_block_hash; + // Canned responses set by block hash take priority. + if let Some(status) = ctx.get_fcu_payload_status(&head_block_hash) { + let response = JsonForkchoiceUpdatedV1Response { + payload_status: JsonPayloadStatusV1::from(status), + payload_id: None, + }; + return Ok(serde_json::to_value(response).unwrap()); + } + let mut response = ctx .execution_block_generator .write() diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index aaeea8aa5a..f5066879a7 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -12,6 +12,7 @@ use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; use slog::{info, Logger}; +use std::collections::HashMap; use std::convert::Infallible; use std::future::Future; use std::marker::PhantomData; @@ -98,6 +99,8 @@ impl MockServer { static_new_payload_response: <_>::default(), static_forkchoice_updated_response: <_>::default(), static_get_block_by_hash_response: <_>::default(), + new_payload_statuses: <_>::default(), + fcu_payload_statuses: <_>::default(), _phantom: PhantomData, }); @@ -370,6 +373,25 @@ impl MockServer { pub fn drop_all_blocks(&self) { self.ctx.execution_block_generator.write().drop_all_blocks() } + + pub fn set_payload_statuses(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) { + self.set_new_payload_status(block_hash, status.clone()); + self.set_fcu_payload_status(block_hash, status); + } + + pub fn set_new_payload_status(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) { + self.ctx + .new_payload_statuses + .lock() + .insert(block_hash, status); + } + + pub fn set_fcu_payload_status(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) { + self.ctx + .fcu_payload_statuses + .lock() + .insert(block_hash, status); + } } #[derive(Debug)] @@ -419,9 +441,33 @@ pub struct Context { pub static_new_payload_response: Arc>>, pub static_forkchoice_updated_response: Arc>>, pub static_get_block_by_hash_response: Arc>>>, + + // Canned responses by block hash. + // + // This is a more flexible and less stateful alternative to `static_new_payload_response` + // and `preloaded_responses`. + pub new_payload_statuses: Arc>>, + pub fcu_payload_statuses: Arc>>, + pub _phantom: PhantomData, } +impl Context { + pub fn get_new_payload_status( + &self, + block_hash: &ExecutionBlockHash, + ) -> Option { + self.new_payload_statuses.lock().get(block_hash).cloned() + } + + pub fn get_fcu_payload_status( + &self, + block_hash: &ExecutionBlockHash, + ) -> Option { + self.fcu_payload_statuses.lock().get(block_hash).cloned() + } +} + /// Configuration for the HTTP server. #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] pub struct Config { diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index e04d671396..04a222c7af 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -35,3 +35,4 @@ fs2 = "0.4.3" beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } fork_choice = { path = "../../consensus/fork_choice" } +execution_layer = { path = "../../beacon_node/execution_layer" } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index fac1ab905a..e05ef0b06b 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.2.0-rc.3 +TESTS_TAG := v1.2.0 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs index 81e186a66b..53387ee4d7 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -7,7 +7,7 @@ use serde_derive::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct BlsAggregateSigs { pub input: Vec, - pub output: String, + pub output: Option, } impl_bls_load_case!(BlsAggregateSigs); @@ -25,14 +25,13 @@ impl Case for BlsAggregateSigs { aggregate_signature.add_assign(&sig); } - // Check for YAML null value, indicating invalid input. This is a bit of a hack, - // as our mutating `aggregate_signature.add` API doesn't play nicely with aggregating 0 - // inputs. - let output_bytes = if self.output == "~" { - AggregateSignature::infinity().serialize().to_vec() - } else { - hex::decode(&self.output[2..]) - .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))? + let output_bytes = match self.output.as_deref() { + // Check for YAML null value, indicating invalid input. This is a bit of a hack, + // as our mutating `aggregate_signature.add` API doesn't play nicely with aggregating 0 + // inputs. + Some("~") | None => AggregateSignature::infinity().serialize().to_vec(), + Some(output) => hex::decode(&output[2..]) + .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?, }; let aggregate_signature = Ok(aggregate_signature.serialize().to_vec()); diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 0e1bb2aced..8faf4db821 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -9,7 +9,8 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainTypes, CachedHead, CountUnrealized, }; -use serde_derive::Deserialize; +use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; +use serde::Deserialize; use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; use std::future::Future; @@ -50,16 +51,53 @@ pub struct Checks { proposer_boost_root: Option, } +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct PayloadStatus { + status: JsonPayloadStatusV1Status, + latest_valid_hash: Option, + validation_error: Option, +} + +impl From for PayloadStatusV1 { + fn from(status: PayloadStatus) -> Self { + PayloadStatusV1 { + status: status.status.into(), + latest_valid_hash: status.latest_valid_hash, + validation_error: status.validation_error, + } + } +} + #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] pub enum Step { - Tick { tick: u64 }, - ValidBlock { block: B }, - MaybeValidBlock { block: B, valid: bool }, - Attestation { attestation: A }, - AttesterSlashing { attester_slashing: AS }, - PowBlock { pow_block: P }, - Checks { checks: Box }, + Tick { + tick: u64, + }, + ValidBlock { + block: B, + }, + MaybeValidBlock { + block: B, + valid: bool, + }, + Attestation { + attestation: A, + }, + AttesterSlashing { + attester_slashing: AS, + }, + PowBlock { + pow_block: P, + }, + OnPayloadInfo { + block_hash: ExecutionBlockHash, + payload_status: PayloadStatus, + }, + Checks { + checks: Box, + }, } #[derive(Debug, Clone, Deserialize)] @@ -119,6 +157,13 @@ impl LoadCase for ForkChoiceTest { ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) .map(|pow_block| Step::PowBlock { pow_block }) } + Step::OnPayloadInfo { + block_hash, + payload_status, + } => Ok(Step::OnPayloadInfo { + block_hash, + payload_status, + }), Step::Checks { checks } => Ok(Step::Checks { checks }), }) .collect::>()?; @@ -168,6 +213,14 @@ impl Case for ForkChoiceTest { tester.process_attester_slashing(attester_slashing) } Step::PowBlock { pow_block } => tester.process_pow_block(pow_block), + Step::OnPayloadInfo { + block_hash, + payload_status, + } => { + let el = tester.harness.mock_execution_layer.as_ref().unwrap(); + el.server + .set_payload_statuses(*block_hash, payload_status.clone().into()); + } Step::Checks { checks } => { let Checks { head, diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 798dae083b..54195cc236 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -117,6 +117,11 @@ impl Operation for Deposit { ssz_decode_file(path) } + fn is_enabled_for_fork(_: ForkName) -> bool { + // Some deposit tests require signature verification but are not marked as such. + cfg!(not(feature = "fake_crypto")) + } + fn apply_to( &self, state: &mut BeaconState, diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 92d5db7fde..dd5ed82da7 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -546,6 +546,37 @@ impl Handler for ForkChoiceHandler { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct OptimisticSyncHandler(PhantomData); + +impl Handler for OptimisticSyncHandler { + type Case = cases::ForkChoiceTest; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "sync" + } + + fn handler_name(&self) -> String { + "optimistic".into() + } + + fn use_rayon() -> bool { + // The opt sync tests use `block_on` which can cause panics with rayon. + false + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name != ForkName::Base + && fork_name != ForkName::Altair + && cfg!(not(feature = "fake_crypto")) + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct GenesisValidityHandler(PhantomData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 2c8b9d223b..28c57028cf 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -448,6 +448,12 @@ fn fork_choice_ex_ante() { ForkChoiceHandler::::new("ex_ante").run(); } +#[test] +fn optimistic_sync() { + OptimisticSyncHandler::::default().run(); + OptimisticSyncHandler::::default().run(); +} + #[test] fn genesis_initialization() { GenesisInitializationHandler::::default().run(); From 59ec6b71b8094f3673f9ca3471d5d93927f7097e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 15 Oct 2022 22:25:54 +0000 Subject: [PATCH 09/21] Consensus context with proposer index caching (#3604) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/2371 ## Proposed Changes Backport some changes from `tree-states` that remove duplicated calculations of the `proposer_index`. With this change the proposer index should be calculated only once for each block, and then plumbed through to every place it is required. ## Additional Info In future I hope to add more data to the consensus context that is cached on a per-epoch basis, like the effective balances of validators and the base rewards. There are some other changes to remove indexing in tests that were also useful for `tree-states` (the `tree-states` types don't implement `Index`). --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +- .../beacon_chain/src/block_verification.rs | 53 ++++++++--- beacon_node/beacon_chain/src/fork_revert.rs | 7 +- .../beacon_chain/tests/block_verification.rs | 8 +- beacon_node/store/src/reconstruct.rs | 9 +- .../state_processing/src/block_replayer.rs | 9 +- .../src/common/slash_validator.rs | 15 ++- .../state_processing/src/consensus_context.rs | 92 +++++++++++++++++++ consensus/state_processing/src/lib.rs | 2 + .../src/per_block_processing.rs | 38 ++++++-- .../block_signature_verifier.rs | 15 ++- .../src/per_block_processing/errors.rs | 24 ++++- .../process_operations.rs | 29 ++++-- .../per_block_processing/signature_sets.rs | 16 +++- .../src/per_block_processing/tests.rs | 61 +++++++++--- lcli/src/skip_slots.rs | 2 +- lcli/src/transition_blocks.rs | 8 +- testing/ef_tests/src/cases/operations.rs | 50 +++++++--- testing/ef_tests/src/cases/sanity_blocks.rs | 12 ++- testing/ef_tests/src/cases/transition.rs | 5 +- testing/state_transition_vectors/src/exit.rs | 26 ++++-- 21 files changed, 388 insertions(+), 100 deletions(-) create mode 100644 consensus/state_processing/src/consensus_context.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3eecc9a0dc..4429abc4c9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -80,7 +80,7 @@ use state_processing::{ }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, - BlockSignatureStrategy, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, + BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, }; use std::cmp::Ordering; use std::collections::HashMap; @@ -3498,7 +3498,6 @@ impl BeaconChain { } let slot = state.slot(); - let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; let sync_aggregate = if matches!(&state, BeaconState::Base(_)) { None @@ -3645,12 +3644,14 @@ impl BeaconChain { ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao, ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification, }; + // Use a context without block root or proposer index so that both are checked. + let mut ctxt = ConsensusContext::new(block.slot()); per_block_processing( &mut state, &block, - None, signature_strategy, VerifyBlockRoot::True, + &mut ctxt, &self.spec, )?; drop(process_timer); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index f83bc535d9..7f59f1cfec 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -71,7 +71,8 @@ use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, state_advance::partial_state_advance, - BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, VerifyBlockRoot, + BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, + VerifyBlockRoot, }; use std::borrow::Cow; use std::fs; @@ -549,7 +550,7 @@ pub fn signature_verify_chain_segment( let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); for (block_root, block) in &chain_segment { - signature_verifier.include_all_signatures(block, Some(*block_root))?; + signature_verifier.include_all_signatures(block, Some(*block_root), None)?; } if signature_verifier.verify().is_err() { @@ -560,10 +561,17 @@ pub fn signature_verify_chain_segment( let mut signature_verified_blocks = chain_segment .into_iter() - .map(|(block_root, block)| SignatureVerifiedBlock { - block, - block_root, - parent: None, + .map(|(block_root, block)| { + // Proposer index has already been verified above during signature verification. + let consensus_context = ConsensusContext::new(block.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + SignatureVerifiedBlock { + block, + block_root, + parent: None, + consensus_context, + } }) .collect::>(); @@ -582,6 +590,7 @@ pub struct GossipVerifiedBlock { pub block: Arc>, pub block_root: Hash256, parent: Option>, + consensus_context: ConsensusContext, } /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit @@ -590,6 +599,7 @@ pub struct SignatureVerifiedBlock { block: Arc>, block_root: Hash256, parent: Option>, + consensus_context: ConsensusContext, } /// Used to await the result of executing payload with a remote EE. @@ -863,10 +873,16 @@ impl GossipVerifiedBlock { // Validate the block's execution_payload (if any). validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?; + // Having checked the proposer index and the block root we can cache them. + let consensus_context = ConsensusContext::new(block.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + Ok(Self { block, block_root, parent, + consensus_context, }) } @@ -926,10 +942,13 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - signature_verifier.include_all_signatures(&block, Some(block_root))?; + signature_verifier.include_all_signatures(&block, Some(block_root), None)?; if signature_verifier.verify().is_ok() { Ok(Self { + consensus_context: ConsensusContext::new(block.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()), block, block_root, parent: Some(parent), @@ -972,13 +991,18 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - signature_verifier.include_all_signatures_except_proposal(&block)?; + // Gossip verification has already checked the proposer index. Use it to check the RANDAO + // signature. + let verified_proposer_index = Some(block.message().proposer_index()); + signature_verifier + .include_all_signatures_except_proposal(&block, verified_proposer_index)?; if signature_verifier.verify().is_ok() { Ok(Self { block, block_root: from.block_root, parent: Some(parent), + consensus_context: from.consensus_context, }) } else { Err(BlockError::InvalidSignature) @@ -1015,8 +1039,14 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? }; - ExecutionPendingBlock::from_signature_verified_components(block, block_root, parent, chain) - .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) + ExecutionPendingBlock::from_signature_verified_components( + block, + block_root, + parent, + self.consensus_context, + chain, + ) + .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) } fn block(&self) -> &SignedBeaconBlock { @@ -1057,6 +1087,7 @@ impl ExecutionPendingBlock { block: Arc>, block_root: Hash256, parent: PreProcessingSnapshot, + mut consensus_context: ConsensusContext, chain: &Arc>, ) -> Result> { if let Some(parent) = chain @@ -1340,10 +1371,10 @@ impl ExecutionPendingBlock { if let Err(err) = per_block_processing( &mut state, &block, - Some(block_root), // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut consensus_context, &chain.spec, ) { match err { diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 654b2713b1..3d48dfd8f6 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -5,7 +5,8 @@ use proto_array::CountUnrealizedFull; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ - per_block_processing, per_block_processing::BlockSignatureStrategy, VerifyBlockRoot, + per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext, + VerifyBlockRoot, }; use std::sync::Arc; use std::time::Duration; @@ -172,12 +173,14 @@ pub fn reset_fork_choice_to_finalization, Cold: It complete_state_advance(&mut state, None, block.slot(), spec) .map_err(|e| format!("State advance failed: {:?}", e))?; + let mut ctxt = ConsensusContext::new(block.slot()) + .set_proposer_index(block.message().proposer_index()); per_block_processing( &mut state, &block, - None, BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, spec, ) .map_err(|e| format!("Error replaying block: {:?}", e))?; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 0ff4e57a8a..998f22f770 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -11,7 +11,7 @@ use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ common::get_indexed_attestation, per_block_processing::{per_block_processing, BlockSignatureStrategy}, - per_slot_processing, BlockProcessingError, VerifyBlockRoot, + per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, }; use std::marker::PhantomData; use std::sync::Arc; @@ -1139,14 +1139,15 @@ async fn add_base_block_to_altair_chain() { // Ensure that it would be impossible to apply this block to `per_block_processing`. { let mut state = state; + let mut ctxt = ConsensusContext::new(base_block.slot()); per_slot_processing(&mut state, None, &harness.chain.spec).unwrap(); assert!(matches!( per_block_processing( &mut state, &base_block, - None, BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, &harness.chain.spec, ), Err(BlockProcessingError::InconsistentBlockFork( @@ -1271,14 +1272,15 @@ async fn add_altair_block_to_base_chain() { // Ensure that it would be impossible to apply this block to `per_block_processing`. { let mut state = state; + let mut ctxt = ConsensusContext::new(altair_block.slot()); per_slot_processing(&mut state, None, &harness.chain.spec).unwrap(); assert!(matches!( per_block_processing( &mut state, &altair_block, - None, BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, &harness.chain.spec, ), Err(BlockProcessingError::InconsistentBlockFork( diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 7db2652f2c..c939fd3f51 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -4,7 +4,8 @@ use crate::{Error, ItemStore, KeyValueStore}; use itertools::{process_results, Itertools}; use slog::info; use state_processing::{ - per_block_processing, per_slot_processing, BlockSignatureStrategy, VerifyBlockRoot, + per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, + VerifyBlockRoot, }; use std::sync::Arc; use types::{EthSpec, Hash256}; @@ -87,12 +88,16 @@ where // Apply block. if let Some(block) = block { + let mut ctxt = ConsensusContext::new(block.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + per_block_processing( &mut state, &block, - Some(block_root), BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, &self.spec, ) .map_err(HotColdDBError::BlockReplayBlockError)?; diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index d4b4b067e3..cc7bd17c50 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -1,6 +1,7 @@ use crate::{ per_block_processing, per_epoch_processing::EpochProcessingSummary, per_slot_processing, - BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, VerifyBlockRoot, + BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, + VerifyBlockRoot, }; use std::marker::PhantomData; use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -254,12 +255,16 @@ where VerifyBlockRoot::False } }); + // Proposer index was already checked when this block was originally processed, we + // can omit recomputing it during replay. + let mut ctxt = ConsensusContext::new(block.slot()) + .set_proposer_index(block.message().proposer_index()); per_block_processing( &mut self.state, block, - None, self.block_sig_strategy, verify_block_root, + &mut ctxt, self.spec, ) .map_err(BlockReplayError::from)?; diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index e9d94a1062..ac2dba875e 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -1,9 +1,13 @@ -use crate::common::{decrease_balance, increase_balance, initiate_validator_exit}; +use crate::{ + common::{decrease_balance, increase_balance, initiate_validator_exit}, + per_block_processing::errors::BlockProcessingError, + ConsensusContext, +}; use safe_arith::SafeArith; use std::cmp; use types::{ consts::altair::{PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, - BeaconStateError as Error, *, + *, }; /// Slash the validator with index `slashed_index`. @@ -11,8 +15,9 @@ pub fn slash_validator( state: &mut BeaconState, slashed_index: usize, opt_whistleblower_index: Option, + ctxt: &mut ConsensusContext, spec: &ChainSpec, -) -> Result<(), Error> { +) -> Result<(), BlockProcessingError> { let epoch = state.current_epoch(); initiate_validator_exit(state, slashed_index, spec)?; @@ -39,7 +44,7 @@ pub fn slash_validator( )?; // Apply proposer and whistleblower rewards - let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)?; + let proposer_index = ctxt.get_proposer_index(state, spec)? as usize; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); let whistleblower_reward = validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; @@ -52,7 +57,7 @@ pub fn slash_validator( // Ensure the whistleblower index is in the validator registry. if state.validators().get(whistleblower_index).is_none() { - return Err(BeaconStateError::UnknownValidator(whistleblower_index)); + return Err(BeaconStateError::UnknownValidator(whistleblower_index).into()); } increase_balance(state, proposer_index, proposer_reward)?; diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs new file mode 100644 index 0000000000..fdd3f95a65 --- /dev/null +++ b/consensus/state_processing/src/consensus_context.rs @@ -0,0 +1,92 @@ +use std::marker::PhantomData; +use tree_hash::TreeHash; +use types::{ + BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, + Slot, +}; + +#[derive(Debug)] +pub struct ConsensusContext { + /// Slot to act as an identifier/safeguard + slot: Slot, + /// Proposer index of the block at `slot`. + proposer_index: Option, + /// Block root of the block at `slot`. + current_block_root: Option, + _phantom: PhantomData, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum ContextError { + BeaconState(BeaconStateError), + SlotMismatch { slot: Slot, expected: Slot }, +} + +impl From for ContextError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl ConsensusContext { + pub fn new(slot: Slot) -> Self { + Self { + slot, + proposer_index: None, + current_block_root: None, + _phantom: PhantomData, + } + } + + pub fn set_proposer_index(mut self, proposer_index: u64) -> Self { + self.proposer_index = Some(proposer_index); + self + } + + pub fn get_proposer_index( + &mut self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + self.check_slot(state.slot())?; + + if let Some(proposer_index) = self.proposer_index { + return Ok(proposer_index); + } + + let proposer_index = state.get_beacon_proposer_index(self.slot, spec)? as u64; + self.proposer_index = Some(proposer_index); + Ok(proposer_index) + } + + pub fn set_current_block_root(mut self, block_root: Hash256) -> Self { + self.current_block_root = Some(block_root); + self + } + + pub fn get_current_block_root>( + &mut self, + block: &SignedBeaconBlock, + ) -> Result { + self.check_slot(block.slot())?; + + if let Some(current_block_root) = self.current_block_root { + return Ok(current_block_root); + } + + let current_block_root = block.message().tree_hash_root(); + self.current_block_root = Some(current_block_root); + Ok(current_block_root) + } + + fn check_slot(&self, slot: Slot) -> Result<(), ContextError> { + if slot == self.slot { + Ok(()) + } else { + Err(ContextError::SlotMismatch { + slot, + expected: self.slot, + }) + } + } +} diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index cf541d4572..e4f36bedd8 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -18,6 +18,7 @@ mod metrics; pub mod block_replayer; pub mod common; +pub mod consensus_context; pub mod genesis; pub mod per_block_processing; pub mod per_epoch_processing; @@ -27,6 +28,7 @@ pub mod upgrade; pub mod verify_operation; pub use block_replayer::{BlockReplayError, BlockReplayer, StateRootStrategy}; +pub use consensus_context::{ConsensusContext, ContextError}; pub use genesis::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, process_activations, diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index e409372ddd..cccc8eacd9 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -1,3 +1,4 @@ +use crate::consensus_context::ConsensusContext; use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid}; use rayon::prelude::*; use safe_arith::{ArithError, SafeArith}; @@ -90,9 +91,9 @@ pub enum VerifyBlockRoot { pub fn per_block_processing>( state: &mut BeaconState, signed_block: &SignedBeaconBlock, - block_root: Option, block_signature_strategy: BlockSignatureStrategy, verify_block_root: VerifyBlockRoot, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { let block = signed_block.message(); @@ -110,6 +111,8 @@ pub fn per_block_processing>( let verify_signatures = match block_signature_strategy { BlockSignatureStrategy::VerifyBulk => { // Verify all signatures in the block at once. + let block_root = Some(ctxt.get_current_block_root(signed_block)?); + let proposer_index = Some(ctxt.get_proposer_index(state, spec)?); block_verify!( BlockSignatureVerifier::verify_entire_block( state, @@ -117,6 +120,7 @@ pub fn per_block_processing>( |pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned), signed_block, block_root, + proposer_index, spec ) .is_ok(), @@ -133,11 +137,12 @@ pub fn per_block_processing>( state, block.temporary_block_header(), verify_block_root, + ctxt, spec, )?; if verify_signatures.is_true() { - verify_block_signature(state, signed_block, block_root, spec)?; + verify_block_signature(state, signed_block, ctxt, spec)?; } let verify_randao = if let BlockSignatureStrategy::VerifyRandao = block_signature_strategy { @@ -157,9 +162,9 @@ pub fn per_block_processing>( process_execution_payload(state, payload, spec)?; } - process_randao(state, block, verify_randao, spec)?; + process_randao(state, block, verify_randao, ctxt, spec)?; process_eth1_data(state, block.body().eth1_data())?; - process_operations(state, block.body(), proposer_index, verify_signatures, spec)?; + process_operations(state, block.body(), verify_signatures, ctxt, spec)?; if let Ok(sync_aggregate) = block.body().sync_aggregate() { process_sync_aggregate( @@ -179,6 +184,7 @@ pub fn process_block_header( state: &mut BeaconState, block_header: BeaconBlockHeader, verify_block_root: VerifyBlockRoot, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result> { // Verify that the slots match @@ -197,8 +203,8 @@ pub fn process_block_header( ); // Verify that proposer index is the correct index - let proposer_index = block_header.proposer_index as usize; - let state_proposer_index = state.get_beacon_proposer_index(block_header.slot, spec)?; + let proposer_index = block_header.proposer_index; + let state_proposer_index = ctxt.get_proposer_index(state, spec)?; verify!( proposer_index == state_proposer_index, HeaderInvalid::ProposerIndexMismatch { @@ -222,11 +228,11 @@ pub fn process_block_header( // Verify proposer is not slashed verify!( - !state.get_validator(proposer_index)?.slashed, + !state.get_validator(proposer_index as usize)?.slashed, HeaderInvalid::ProposerSlashed(proposer_index) ); - Ok(proposer_index as u64) + Ok(proposer_index) } /// Verifies the signature of a block. @@ -235,15 +241,18 @@ pub fn process_block_header( pub fn verify_block_signature>( state: &BeaconState, block: &SignedBeaconBlock, - block_root: Option, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockOperationError> { + let block_root = Some(ctxt.get_current_block_root(block)?); + let proposer_index = Some(ctxt.get_proposer_index(state, spec)?); verify!( block_proposal_signature_set( state, |i| get_pubkey_from_state(state, i), block, block_root, + proposer_index, spec )? .verify(), @@ -259,12 +268,21 @@ pub fn process_randao>( state: &mut BeaconState, block: BeaconBlockRef<'_, T, Payload>, verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { if verify_signatures.is_true() { // Verify RANDAO reveal signature. + let proposer_index = ctxt.get_proposer_index(state, spec)?; block_verify!( - randao_signature_set(state, |i| get_pubkey_from_state(state, i), block, spec)?.verify(), + randao_signature_set( + state, + |i| get_pubkey_from_state(state, i), + block, + Some(proposer_index), + spec + )? + .verify(), BlockProcessingError::RandaoSignatureInvalid ); } diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 78205ca92c..7584df14ec 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -123,10 +123,11 @@ where decompressor: D, block: &'a SignedBeaconBlock, block_root: Option, + verified_proposer_index: Option, spec: &'a ChainSpec, ) -> Result<()> { let mut verifier = Self::new(state, get_pubkey, decompressor, spec); - verifier.include_all_signatures(block, block_root)?; + verifier.include_all_signatures(block, block_root, verified_proposer_index)?; verifier.verify() } @@ -135,9 +136,10 @@ where &mut self, block: &'a SignedBeaconBlock, block_root: Option, + verified_proposer_index: Option, ) -> Result<()> { - self.include_block_proposal(block, block_root)?; - self.include_all_signatures_except_proposal(block)?; + self.include_block_proposal(block, block_root, verified_proposer_index)?; + self.include_all_signatures_except_proposal(block, verified_proposer_index)?; Ok(()) } @@ -147,8 +149,9 @@ where pub fn include_all_signatures_except_proposal>( &mut self, block: &'a SignedBeaconBlock, + verified_proposer_index: Option, ) -> Result<()> { - self.include_randao_reveal(block)?; + self.include_randao_reveal(block, verified_proposer_index)?; self.include_proposer_slashings(block)?; self.include_attester_slashings(block)?; self.include_attestations(block)?; @@ -164,12 +167,14 @@ where &mut self, block: &'a SignedBeaconBlock, block_root: Option, + verified_proposer_index: Option, ) -> Result<()> { let set = block_proposal_signature_set( self.state, self.get_pubkey.clone(), block, block_root, + verified_proposer_index, self.spec, )?; self.sets.push(set); @@ -180,11 +185,13 @@ where pub fn include_randao_reveal>( &mut self, block: &'a SignedBeaconBlock, + verified_proposer_index: Option, ) -> Result<()> { let set = randao_signature_set( self.state, self.get_pubkey.clone(), block.message(), + verified_proposer_index, self.spec, )?; self.sets.push(set); diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index e214b6e63d..71bd55f883 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -1,4 +1,5 @@ use super::signature_sets::Error as SignatureSetError; +use crate::ContextError; use merkle_proof::MerkleTreeError; use safe_arith::ArithError; use types::*; @@ -70,6 +71,7 @@ pub enum BlockProcessingError { found: u64, }, ExecutionInvalid, + ConsensusContext(ContextError), } impl From for BlockProcessingError { @@ -102,6 +104,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: ContextError) -> Self { + BlockProcessingError::ConsensusContext(e) + } +} + impl From> for BlockProcessingError { fn from(e: BlockOperationError) -> BlockProcessingError { match e { @@ -109,6 +117,7 @@ impl From> for BlockProcessingError { BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e), + BlockOperationError::ConsensusContext(e) => BlockProcessingError::ConsensusContext(e), BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e), } } @@ -136,6 +145,7 @@ macro_rules! impl_into_block_processing_error_with_index { BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e), + BlockOperationError::ConsensusContext(e) => BlockProcessingError::ConsensusContext(e), BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e), } } @@ -167,6 +177,7 @@ pub enum BlockOperationError { BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), + ConsensusContext(ContextError), ArithError(ArithError), } @@ -199,6 +210,12 @@ impl From for BlockOperationError { } } +impl From for BlockOperationError { + fn from(e: ContextError) -> Self { + BlockOperationError::ConsensusContext(e) + } +} + #[derive(Debug, PartialEq, Clone)] pub enum HeaderInvalid { ProposalSignatureInvalid, @@ -208,14 +225,14 @@ pub enum HeaderInvalid { block_slot: Slot, }, ProposerIndexMismatch { - block_proposer_index: usize, - state_proposer_index: usize, + block_proposer_index: u64, + state_proposer_index: u64, }, ParentBlockRootMismatch { state: Hash256, block: Hash256, }, - ProposerSlashed(usize), + ProposerSlashed(u64), } #[derive(Debug, PartialEq, Clone)] @@ -310,6 +327,7 @@ impl From> BlockOperationError::BeaconStateError(e) => BlockOperationError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockOperationError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockOperationError::SszTypesError(e), + BlockOperationError::ConsensusContext(e) => BlockOperationError::ConsensusContext(e), BlockOperationError::ArithError(e) => BlockOperationError::ArithError(e), } } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 31a4ac1fb4..1000586e66 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -12,23 +12,25 @@ use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_ pub fn process_operations<'a, T: EthSpec, Payload: ExecPayload>( state: &mut BeaconState, block_body: BeaconBlockBodyRef<'a, T, Payload>, - proposer_index: u64, verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { process_proposer_slashings( state, block_body.proposer_slashings(), verify_signatures, + ctxt, spec, )?; process_attester_slashings( state, block_body.attester_slashings(), verify_signatures, + ctxt, spec, )?; - process_attestations(state, block_body, proposer_index, verify_signatures, spec)?; + process_attestations(state, block_body, verify_signatures, ctxt, spec)?; process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; Ok(()) @@ -45,12 +47,13 @@ pub mod base { state: &mut BeaconState, attestations: &[Attestation], verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { // Ensure the previous epoch cache exists. state.build_committee_cache(RelativeEpoch::Previous, spec)?; - let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; + let proposer_index = ctxt.get_proposer_index(state, spec)?; // Verify and apply each attestation. for (i, attestation) in attestations.iter().enumerate() { @@ -87,10 +90,11 @@ pub mod altair { pub fn process_attestations( state: &mut BeaconState, attestations: &[Attestation], - proposer_index: u64, verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + let proposer_index = ctxt.get_proposer_index(state, spec)?; attestations .iter() .enumerate() @@ -170,6 +174,7 @@ pub fn process_proposer_slashings( state: &mut BeaconState, proposer_slashings: &[ProposerSlashing], verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { // Verify and apply proposer slashings in series. @@ -186,6 +191,7 @@ pub fn process_proposer_slashings( state, proposer_slashing.signed_header_1.message.proposer_index as usize, None, + ctxt, spec, )?; @@ -201,6 +207,7 @@ pub fn process_attester_slashings( state: &mut BeaconState, attester_slashings: &[AttesterSlashing], verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { for (i, attester_slashing) in attester_slashings.iter().enumerate() { @@ -211,7 +218,7 @@ pub fn process_attester_slashings( get_slashable_indices(state, attester_slashing).map_err(|e| e.into_with_index(i))?; for i in slashable_indices { - slash_validator(state, i as usize, None, spec)?; + slash_validator(state, i as usize, None, ctxt, spec)?; } } @@ -222,20 +229,26 @@ pub fn process_attester_slashings( pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( state: &mut BeaconState, block_body: BeaconBlockBodyRef<'a, T, Payload>, - proposer_index: u64, verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { match block_body { BeaconBlockBodyRef::Base(_) => { - base::process_attestations(state, block_body.attestations(), verify_signatures, spec)?; + base::process_attestations( + state, + block_body.attestations(), + verify_signatures, + ctxt, + spec, + )?; } BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) => { altair::process_attestations( state, block_body.attestations(), - proposer_index, verify_signatures, + ctxt, spec, )?; } diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 5ce1bfddd5..90bbdd56fe 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -76,6 +76,7 @@ pub fn block_proposal_signature_set<'a, T, F, Payload: ExecPayload>( get_pubkey: F, signed_block: &'a SignedBeaconBlock, block_root: Option, + verified_proposer_index: Option, spec: &'a ChainSpec, ) -> Result> where @@ -83,8 +84,12 @@ where F: Fn(usize) -> Option>, { let block = signed_block.message(); - let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)? as u64; + let proposer_index = if let Some(proposer_index) = verified_proposer_index { + proposer_index + } else { + state.get_beacon_proposer_index(block.slot(), spec)? as u64 + }; if proposer_index != block.proposer_index() { return Err(Error::IncorrectBlockProposer { block: block.proposer_index(), @@ -156,13 +161,18 @@ pub fn randao_signature_set<'a, T, F, Payload: ExecPayload>( state: &'a BeaconState, get_pubkey: F, block: BeaconBlockRef<'a, T, Payload>, + verified_proposer_index: Option, spec: &'a ChainSpec, ) -> Result> where T: EthSpec, F: Fn(usize) -> Option>, { - let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; + let proposer_index = if let Some(proposer_index) = verified_proposer_index { + proposer_index + } else { + state.get_beacon_proposer_index(block.slot(), spec)? as u64 + }; let domain = spec.get_domain( block.slot().epoch(T::slots_per_epoch()), @@ -178,7 +188,7 @@ where Ok(SignatureSet::single_pubkey( block.body().randao_reveal(), - get_pubkey(proposer_index).ok_or(Error::ValidatorUnknown(proposer_index as u64))?, + get_pubkey(proposer_index as usize).ok_or(Error::ValidatorUnknown(proposer_index))?, message, )) } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index e244e02c2d..b7d28832db 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -8,7 +8,7 @@ use crate::per_block_processing::errors::{ }; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, - BlockSignatureStrategy, VerifyBlockRoot, VerifySignatures, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use lazy_static::lazy_static; @@ -67,12 +67,13 @@ async fn valid_block_ok() { .make_block_return_pre_state(state, slot + Slot::new(1)) .await; + let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( &mut state, &block, - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -91,12 +92,13 @@ async fn invalid_block_header_state_slot() { let (mut block, signature) = signed_block.deconstruct(); *block.slot_mut() = slot + Slot::new(1); + let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( &mut state, &SignedBeaconBlock::from_block(block, signature), - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -122,12 +124,13 @@ async fn invalid_parent_block_root() { let (mut block, signature) = signed_block.deconstruct(); *block.parent_root_mut() = Hash256::from([0xAA; 32]); + let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( &mut state, &SignedBeaconBlock::from_block(block, signature), - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -154,12 +157,13 @@ async fn invalid_block_signature() { .await; let (block, _) = signed_block.deconstruct(); + let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( &mut state, &SignedBeaconBlock::from_block(block, Signature::empty()), - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -186,12 +190,13 @@ async fn invalid_randao_reveal_signature() { }) .await; + let mut ctxt = ConsensusContext::new(signed_block.slot()); let result = per_block_processing( &mut state, &signed_block, - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &spec, ); @@ -386,11 +391,12 @@ async fn invalid_attestation_no_committee_for_index() { head_block.to_mut().body_mut().attestations_mut()[0] .data .index += 1; + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); @@ -424,11 +430,12 @@ async fn invalid_attestation_wrong_justified_checkpoint() { .data .source = new_justified_checkpoint; + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); @@ -463,11 +470,12 @@ async fn invalid_attestation_bad_aggregation_bitfield_len() { head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits = Bitfield::with_capacity(spec.target_committee_size).unwrap(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); @@ -495,11 +503,12 @@ async fn invalid_attestation_bad_signature() { .0; head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); // Expecting BadSignature because we're signing with invalid secret_keys @@ -533,11 +542,12 @@ async fn invalid_attestation_included_too_early() { .data .slot = new_attesation_slot; + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); @@ -575,11 +585,12 @@ async fn invalid_attestation_included_too_late() { .data .slot = new_attesation_slot; + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); assert_eq!( @@ -613,11 +624,12 @@ async fn invalid_attestation_target_epoch_slot_mismatch() { .target .epoch += Epoch::new(1); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( &mut state, head_block.body(), - head_block.proposer_index(), VerifySignatures::True, + &mut ctxt, &spec, ); assert_eq!( @@ -640,10 +652,12 @@ async fn valid_insert_attester_slashing() { let attester_slashing = harness.make_attester_slashing(vec![1, 2]); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, &[attester_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -660,10 +674,12 @@ async fn invalid_attester_slashing_not_slashable() { attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, &[attester_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -686,10 +702,12 @@ async fn invalid_attester_slashing_1_invalid() { attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, &[attester_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -715,10 +733,12 @@ async fn invalid_attester_slashing_2_invalid() { attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, &[attester_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -741,10 +761,12 @@ async fn valid_insert_proposer_slashing() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); // Expecting Ok(_) because we inserted a valid proposer slashing @@ -760,10 +782,12 @@ async fn invalid_proposer_slashing_proposals_identical() { proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone(); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -787,10 +811,12 @@ async fn invalid_proposer_slashing_proposer_unknown() { proposer_slashing.signed_header_2.message.proposer_index = 3_141_592; let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -811,10 +837,12 @@ async fn invalid_proposer_slashing_duplicate_slashing() { let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result_1 = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing.clone()], VerifySignatures::False, + &mut ctxt, &spec, ); assert!(result_1.is_ok()); @@ -823,6 +851,7 @@ async fn invalid_proposer_slashing_duplicate_slashing() { &mut state, &[proposer_slashing], VerifySignatures::False, + &mut ctxt, &spec, ); // Expecting ProposerNotSlashable because we've already slashed the validator @@ -842,10 +871,12 @@ async fn invalid_bad_proposal_1_signature() { let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.signature = Signature::empty(); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -866,10 +897,12 @@ async fn invalid_bad_proposal_2_signature() { let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_2.signature = Signature::empty(); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::True, + &mut ctxt, &spec, ); @@ -891,10 +924,12 @@ async fn invalid_proposer_slashing_proposal_epoch_mismatch() { proposer_slashing.signed_header_1.message.slot = Slot::new(0); proposer_slashing.signed_header_2.message.slot = Slot::new(128); let mut state = harness.get_current_state(); + let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_proposer_slashings( &mut state, &[proposer_slashing], VerifySignatures::False, + &mut ctxt, &spec, ); diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 28310f7683..8bd9af99ad 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -121,7 +121,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< }; for i in 0..runs { - let mut state = state.clone_with(CloneConfig::committee_caches_only()); + let mut state = state.clone_with(CloneConfig::all()); let start = Instant::now(); diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index dc825d2c02..b25cec81b5 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -74,7 +74,7 @@ use eth2::{ use ssz::Encode; use state_processing::{ block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, - BlockSignatureStrategy, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs::File; @@ -360,6 +360,7 @@ fn do_transition( decompressor, &block, Some(block_root), + Some(block.message().proposer_index()), spec, ) .map_err(|e| format!("Invalid block signature: {:?}", e))?; @@ -367,12 +368,15 @@ fn do_transition( } let t = Instant::now(); + let mut ctxt = ConsensusContext::new(pre_state.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); per_block_processing( &mut pre_state, &block, - None, BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, + &mut ctxt, spec, ) .map_err(|e| format!("State transition failed: {:?}", e))?; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 54195cc236..a351a597c0 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -5,14 +5,17 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use crate::testing_spec; use crate::type_name::TypeName; use serde_derive::Deserialize; -use state_processing::per_block_processing::{ - errors::BlockProcessingError, - process_block_header, process_execution_payload, - process_operations::{ - altair, base, process_attester_slashings, process_deposits, process_exits, - process_proposer_slashings, +use state_processing::{ + per_block_processing::{ + errors::BlockProcessingError, + process_block_header, process_execution_payload, + process_operations::{ + altair, base, process_attester_slashings, process_deposits, process_exits, + process_proposer_slashings, + }, + process_sync_aggregate, VerifyBlockRoot, VerifySignatures, }, - process_sync_aggregate, VerifyBlockRoot, VerifySignatures, + ConsensusContext, }; use std::fmt::Debug; use std::path::Path; @@ -76,11 +79,16 @@ impl Operation for Attestation { spec: &ChainSpec, _: &Operations, ) -> Result<(), BlockProcessingError> { - let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; + let mut ctxt = ConsensusContext::new(state.slot()); + let proposer_index = ctxt.get_proposer_index(state, spec)?; match state { - BeaconState::Base(_) => { - base::process_attestations(state, &[self.clone()], VerifySignatures::True, spec) - } + BeaconState::Base(_) => base::process_attestations( + state, + &[self.clone()], + VerifySignatures::True, + &mut ctxt, + spec, + ), BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_attestation( state, self, @@ -108,7 +116,14 @@ impl Operation for AttesterSlashing { spec: &ChainSpec, _: &Operations, ) -> Result<(), BlockProcessingError> { - process_attester_slashings(state, &[self.clone()], VerifySignatures::True, spec) + let mut ctxt = ConsensusContext::new(state.slot()); + process_attester_slashings( + state, + &[self.clone()], + VerifySignatures::True, + &mut ctxt, + spec, + ) } } @@ -147,7 +162,14 @@ impl Operation for ProposerSlashing { spec: &ChainSpec, _: &Operations, ) -> Result<(), BlockProcessingError> { - process_proposer_slashings(state, &[self.clone()], VerifySignatures::True, spec) + let mut ctxt = ConsensusContext::new(state.slot()); + process_proposer_slashings( + state, + &[self.clone()], + VerifySignatures::True, + &mut ctxt, + spec, + ) } } @@ -189,10 +211,12 @@ impl Operation for BeaconBlock { spec: &ChainSpec, _: &Operations, ) -> Result<(), BlockProcessingError> { + let mut ctxt = ConsensusContext::new(state.slot()); process_block_header( state, self.to_ref().temporary_block_header(), VerifyBlockRoot::True, + &mut ctxt, spec, )?; Ok(()) diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index 5f0db25ded..8a75789724 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, - VerifyBlockRoot, + ConsensusContext, VerifyBlockRoot, }; use types::{BeaconState, EthSpec, ForkName, RelativeEpoch, SignedBeaconBlock}; @@ -91,26 +91,28 @@ impl Case for SanityBlocks { .build_committee_cache(RelativeEpoch::Current, spec) .unwrap(); + let mut ctxt = ConsensusContext::new(indiv_state.slot()); per_block_processing( &mut indiv_state, signed_block, - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, spec, )?; + let mut ctxt = ConsensusContext::new(indiv_state.slot()); per_block_processing( &mut bulk_state, signed_block, - None, BlockSignatureStrategy::VerifyBulk, VerifyBlockRoot::True, + &mut ctxt, spec, )?; - if block.state_root() == bulk_state.canonical_root() - && block.state_root() == indiv_state.canonical_root() + if block.state_root() == bulk_state.update_tree_hash_cache().unwrap() + && block.state_root() == indiv_state.update_tree_hash_cache().unwrap() { Ok(()) } else { diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index d2b1bb2c62..2c9134aba5 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -4,7 +4,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, - VerifyBlockRoot, + ConsensusContext, VerifyBlockRoot, }; use std::str::FromStr; use types::{BeaconState, Epoch, ForkName, SignedBeaconBlock}; @@ -91,12 +91,13 @@ impl Case for TransitionTest { .map_err(|e| format!("Failed to advance: {:?}", e))?; // Apply block. + let mut ctxt = ConsensusContext::new(state.slot()); per_block_processing( &mut state, block, - None, BlockSignatureStrategy::VerifyBulk, VerifyBlockRoot::True, + &mut ctxt, spec, ) .map_err(|e| format!("Block processing failed: {:?}", e))?; diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index f485e1a268..d581eba965 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -2,7 +2,7 @@ use super::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use state_processing::{ per_block_processing, per_block_processing::errors::ExitInvalid, BlockProcessingError, - BlockSignatureStrategy, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use types::{BeaconBlock, BeaconState, Epoch, EthSpec, SignedBeaconBlock}; @@ -64,12 +64,13 @@ impl ExitTest { block: &SignedBeaconBlock, state: &mut BeaconState, ) -> Result<(), BlockProcessingError> { + let mut ctxt = ConsensusContext::new(block.slot()); per_block_processing( state, block, - None, BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, + &mut ctxt, &E::default_spec(), ) } @@ -125,7 +126,7 @@ vectors_and_tests!( ExitTest { block_modifier: Box::new(|_, block| { // Duplicate the exit - let exit = block.body().voluntary_exits()[0].clone(); + let exit = block.body().voluntary_exits().get(0).unwrap().clone(); block.body_mut().voluntary_exits_mut().push(exit).unwrap(); }), expected: Err(BlockProcessingError::ExitInvalid { @@ -144,7 +145,11 @@ vectors_and_tests!( invalid_validator_unknown, ExitTest { block_modifier: Box::new(|_, block| { - block.body_mut().voluntary_exits_mut()[0] + block + .body_mut() + .voluntary_exits_mut() + .get_mut(0) + .unwrap() .message .validator_index = VALIDATOR_COUNT as u64; }), @@ -165,7 +170,7 @@ vectors_and_tests!( invalid_exit_already_initiated, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut()[0].exit_epoch = STATE_EPOCH + 1; + state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH + 1; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -184,7 +189,8 @@ vectors_and_tests!( invalid_not_active_before_activation_epoch, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut()[0].activation_epoch = E::default_spec().far_future_epoch; + state.validators_mut().get_mut(0).unwrap().activation_epoch = + E::default_spec().far_future_epoch; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -203,7 +209,7 @@ vectors_and_tests!( invalid_not_active_after_exit_epoch, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut()[0].exit_epoch = STATE_EPOCH; + state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -303,7 +309,11 @@ vectors_and_tests!( block_modifier: Box::new(|_, block| { // Shift the validator index by 1 so that it's mismatched from the key that was // used to sign. - block.body_mut().voluntary_exits_mut()[0] + block + .body_mut() + .voluntary_exits_mut() + .get_mut(0) + .unwrap() .message .validator_index = VALIDATOR_INDEX + 1; }), From edf23bb40ee4f6e4b6024b66ca2a95caaf1811f0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 18 Oct 2022 04:02:06 +0000 Subject: [PATCH 10/21] Fix attestation shuffling filter (#3629) ## Issue Addressed Fix a bug in block production that results in blocks with 0 attestations during the first slot of an epoch. The bug is marked by debug logs of the form: > DEBG Discarding attestation because of missing ancestor, block_root: 0x3cc00d9c9e0883b2d0db8606278f2b8423d4902f9a1ee619258b5b60590e64f8, pivot_slot: 4042591 It occurs when trying to look up the shuffling decision root for an attestation from a slot which is prior to fork choice's finalized block. This happens frequently when proposing in the first slot of the epoch where we have: - `current_epoch == n` - `attestation.data.target.epoch == n - 1` - attestation shuffling epoch `== n - 3` (decision block being the last block of `n - 3`) - `state.finalized_checkpoint.epoch == n - 2` (first block of `n - 2` is finalized) Hence the shuffling decision slot is out of range of the fork choice backwards iterator _by a single slot_. Unfortunately this bug was hidden when we weren't pruning fork choice, and then reintroduced in v2.5.1 when we fixed the pruning (https://github.com/sigp/lighthouse/releases/tag/v2.5.1). There's no way to turn that off or disable the filtering in our current release, so we need a new release to fix this issue. Fortunately, it also does not occur on every epoch boundary because of the gradual pruning of fork choice every 256 blocks (~8 epochs): https://github.com/sigp/lighthouse/blob/01e84b71f524968f5b940fbd2fa31d29408b6581/consensus/proto_array/src/proto_array_fork_choice.rs#L16 https://github.com/sigp/lighthouse/blob/01e84b71f524968f5b940fbd2fa31d29408b6581/consensus/proto_array/src/proto_array.rs#L713-L716 So the probability of proposing a 0-attestation block given a proposal assignment is approximately `1/32 * 1/8 = 0.39%`. ## Proposed Changes - Load the block's shuffling ID from fork choice and verify it against the expected shuffling ID of the head state. This code was initially written before we had settled on a representation of shuffling IDs, so I think it's a nice simplification to make use of them here rather than more ad-hoc logic that fundamentally does the same thing. ## Additional Info Thanks to @moshe-blox for noticing this issue and bringing it to our attention. --- beacon_node/beacon_chain/src/beacon_chain.rs | 115 +++++++------- beacon_node/beacon_chain/tests/store_tests.rs | 140 +++++++++--------- 2 files changed, 139 insertions(+), 116 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4429abc4c9..86b43a1a39 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1991,60 +1991,75 @@ impl BeaconChain { target_epoch: Epoch, state: &BeaconState, ) -> bool { - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let shuffling_lookahead = 1 + self.spec.min_seed_lookahead.as_u64(); - - // Shuffling can't have changed if we're in the first few epochs - if state.current_epoch() < shuffling_lookahead { - return true; - } - - // Otherwise the shuffling is determined by the block at the end of the target epoch - // minus the shuffling lookahead (usually 2). We call this the "pivot". - let pivot_slot = - if target_epoch == state.previous_epoch() || target_epoch == state.current_epoch() { - (target_epoch - shuffling_lookahead).end_slot(slots_per_epoch) - } else { - return false; - }; - - let state_pivot_block_root = match state.get_block_root(pivot_slot) { - Ok(root) => *root, - Err(e) => { - warn!( - &self.log, - "Missing pivot block root for attestation"; - "slot" => pivot_slot, - "error" => ?e, - ); - return false; - } - }; - - // Use fork choice's view of the block DAG to quickly evaluate whether the attestation's - // pivot block is the same as the current state's pivot block. If it is, then the - // attestation's shuffling is the same as the current state's. - // To account for skipped slots, find the first block at *or before* the pivot slot. - let fork_choice_lock = self.canonical_head.fork_choice_read_lock(); - let pivot_block_root = fork_choice_lock - .proto_array() - .core_proto_array() - .iter_block_roots(block_root) - .find(|(_, slot)| *slot <= pivot_slot) - .map(|(block_root, _)| block_root); - drop(fork_choice_lock); - - match pivot_block_root { - Some(root) => root == state_pivot_block_root, - None => { + self.shuffling_is_compatible_result(block_root, target_epoch, state) + .unwrap_or_else(|e| { debug!( - &self.log, - "Discarding attestation because of missing ancestor"; - "pivot_slot" => pivot_slot.as_u64(), + self.log, + "Skipping attestation with incompatible shuffling"; "block_root" => ?block_root, + "target_epoch" => target_epoch, + "reason" => ?e, ); false + }) + } + + fn shuffling_is_compatible_result( + &self, + block_root: &Hash256, + target_epoch: Epoch, + state: &BeaconState, + ) -> Result { + // Compute the shuffling ID for the head state in the `target_epoch`. + let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), target_epoch) + .map_err(|e| Error::BeaconStateError(e.into()))?; + let head_shuffling_id = + AttestationShufflingId::new(self.genesis_block_root, state, relative_epoch)?; + + // Load the block's shuffling ID from fork choice. We use the variant of `get_block` that + // checks descent from the finalized block, so there's one case where we'll spuriously + // return `false`: where an attestation for the previous epoch nominates the pivot block + // which is the parent block of the finalized block. Such attestations are not useful, so + // this doesn't matter. + let fork_choice_lock = self.canonical_head.fork_choice_read_lock(); + let block = fork_choice_lock + .get_block(block_root) + .ok_or(Error::AttestationHeadNotInForkChoice(*block_root))?; + drop(fork_choice_lock); + + let block_shuffling_id = if target_epoch == block.current_epoch_shuffling_id.shuffling_epoch + { + block.current_epoch_shuffling_id + } else if target_epoch == block.next_epoch_shuffling_id.shuffling_epoch { + block.next_epoch_shuffling_id + } else if target_epoch > block.next_epoch_shuffling_id.shuffling_epoch { + AttestationShufflingId { + shuffling_epoch: target_epoch, + shuffling_decision_block: *block_root, } + } else { + debug!( + self.log, + "Skipping attestation with incompatible shuffling"; + "block_root" => ?block_root, + "target_epoch" => target_epoch, + "reason" => "target epoch less than block epoch" + ); + return Ok(false); + }; + + if head_shuffling_id == block_shuffling_id { + Ok(true) + } else { + debug!( + self.log, + "Skipping attestation with incompatible shuffling"; + "block_root" => ?block_root, + "target_epoch" => target_epoch, + "head_shuffling_id" => ?head_shuffling_id, + "block_shuffling_id" => ?block_shuffling_id, + ); + Ok(false) } } @@ -4460,7 +4475,7 @@ impl BeaconChain { /// /// If the committee for `(head_block_root, shuffling_epoch)` isn't found in the /// `shuffling_cache`, we will read a state from disk and then update the `shuffling_cache`. - pub(crate) fn with_committee_cache( + pub fn with_committee_cache( &self, head_block_root: Hash256, shuffling_epoch: Epoch, diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 883b871b1c..b1907bc96e 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -811,7 +811,6 @@ async fn shuffling_compatible_linear_chain() { let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - // Skip the block at the end of the first epoch. let head_block_root = harness .extend_chain( 4 * E::slots_per_epoch() as usize, @@ -824,10 +823,6 @@ async fn shuffling_compatible_linear_chain() { &harness, &get_state_for_block(&harness, head_block_root), head_block_root, - true, - true, - None, - None, ); } @@ -859,10 +854,6 @@ async fn shuffling_compatible_missing_pivot_block() { &harness, &get_state_for_block(&harness, head_block_root), head_block_root, - true, - true, - Some(E::slots_per_epoch() - 2), - Some(E::slots_per_epoch() - 2), ); } @@ -880,10 +871,10 @@ async fn shuffling_compatible_simple_fork() { let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); - check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None); - check_shuffling_compatible(&harness, &head1_state, head2, false, false, None, None); - check_shuffling_compatible(&harness, &head2_state, head1, false, false, None, None); - check_shuffling_compatible(&harness, &head2_state, head2, true, true, None, None); + check_shuffling_compatible(&harness, &head1_state, head1); + check_shuffling_compatible(&harness, &head1_state, head2); + check_shuffling_compatible(&harness, &head2_state, head1); + check_shuffling_compatible(&harness, &head2_state, head2); drop(db_path); } @@ -902,21 +893,10 @@ async fn shuffling_compatible_short_fork() { let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); - check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None); - check_shuffling_compatible(&harness, &head1_state, head2, false, true, None, None); - // NOTE: don't check this case, as block 14 from the first chain appears valid on the second - // chain due to it matching the second chain's block 15. - // check_shuffling_compatible(&harness, &head2_state, head1, false, true, None, None); - check_shuffling_compatible( - &harness, - &head2_state, - head2, - true, - true, - // Required because of the skipped slot. - Some(2 * E::slots_per_epoch() - 2), - None, - ); + check_shuffling_compatible(&harness, &head1_state, head1); + check_shuffling_compatible(&harness, &head1_state, head2); + check_shuffling_compatible(&harness, &head2_state, head1); + check_shuffling_compatible(&harness, &head2_state, head2); drop(db_path); } @@ -940,54 +920,82 @@ fn check_shuffling_compatible( harness: &TestHarness, head_state: &BeaconState, head_block_root: Hash256, - current_epoch_valid: bool, - previous_epoch_valid: bool, - current_epoch_cutoff_slot: Option, - previous_epoch_cutoff_slot: Option, ) { - let shuffling_lookahead = harness.chain.spec.min_seed_lookahead.as_u64() + 1; - let current_pivot_slot = - (head_state.current_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch()); - let previous_pivot_slot = - (head_state.previous_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch()); - for maybe_tuple in harness .chain .rev_iter_block_roots_from(head_block_root) .unwrap() { let (block_root, slot) = maybe_tuple.unwrap(); - // Shuffling is compatible targeting the current epoch, - // if slot is greater than or equal to the current epoch pivot block. - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, + + // Would an attestation to `block_root` at the current epoch be compatible with the head + // state's shuffling? + let current_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch(), + &head_state, + ); + + // Check for consistency with the more expensive shuffling lookup. + harness + .chain + .with_committee_cache( + block_root, head_state.current_epoch(), - &head_state - ), - current_epoch_valid - && slot >= current_epoch_cutoff_slot.unwrap_or(current_pivot_slot.as_u64()) - ); + |committee_cache, _| { + let state_cache = head_state.committee_cache(RelativeEpoch::Current).unwrap(); + if current_epoch_shuffling_is_compatible { + assert_eq!(committee_cache, state_cache, "block at slot {slot}"); + } else { + assert_ne!(committee_cache, state_cache, "block at slot {slot}"); + } + Ok(()) + }, + ) + .unwrap_or_else(|e| { + // If the lookup fails then the shuffling must be invalid in some way, e.g. the + // block with `block_root` is from a later epoch than `previous_epoch`. + assert!( + !current_epoch_shuffling_is_compatible, + "block at slot {slot} has compatible shuffling at epoch {} \ + but should be incompatible due to error: {e:?}", + head_state.current_epoch() + ); + }); + // Similarly for the previous epoch - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, + let previous_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( + &block_root, + head_state.previous_epoch(), + &head_state, + ); + harness + .chain + .with_committee_cache( + block_root, head_state.previous_epoch(), - &head_state - ), - previous_epoch_valid - && slot >= previous_epoch_cutoff_slot.unwrap_or(previous_pivot_slot.as_u64()) - ); - // Targeting the next epoch should always return false - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, - head_state.current_epoch() + 1, - &head_state - ), - false - ); - // Targeting two epochs before the current epoch should also always return false + |committee_cache, _| { + let state_cache = head_state.committee_cache(RelativeEpoch::Previous).unwrap(); + if previous_epoch_shuffling_is_compatible { + assert_eq!(committee_cache, state_cache); + } else { + assert_ne!(committee_cache, state_cache); + } + Ok(()) + }, + ) + .unwrap_or_else(|e| { + // If the lookup fails then the shuffling must be invalid in some way, e.g. the + // block with `block_root` is from a later epoch than `previous_epoch`. + assert!( + !previous_epoch_shuffling_is_compatible, + "block at slot {slot} has compatible shuffling at epoch {} \ + but should be incompatible due to error: {e:?}", + head_state.previous_epoch() + ); + }); + + // Targeting two epochs before the current epoch should always return false if head_state.current_epoch() >= 2 { assert_eq!( harness.chain.shuffling_is_compatible( From c5cd0d9b3f49f3fef76dc3be75a2d773b75bf9b2 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 18 Oct 2022 04:02:07 +0000 Subject: [PATCH 11/21] add execution-timeout-multiplier flag to optionally increase timeouts (#3631) ## Issue Addressed Add flag to lengthen execution layer timeouts Which issue # does this PR address? #3607 ## Proposed Changes Added execution-timeout-multiplier flag and a cli test to ensure the execution layer config has the multiplier set correctly. Please list or describe the changes introduced by this PR. Add execution_timeout_multiplier to the execution layer config as Option and pass the u32 to HttpJsonRpc. ## Additional Info Not certain that this is the best way to implement it so I'd appreciate any feedback. Please provide any additional information. For example, future considerations or information useful for reviewers. --- beacon_node/eth1/src/service.rs | 10 ++- beacon_node/eth1/tests/test.rs | 9 ++- .../execution_layer/src/engine_api/http.rs | 63 ++++++++++++++----- beacon_node/execution_layer/src/lib.rs | 5 +- beacon_node/src/cli.rs | 9 ++- beacon_node/src/config.rs | 3 + lighthouse/tests/beacon_node.rs | 16 +++++ 7 files changed, 91 insertions(+), 24 deletions(-) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index fae6eef9c2..c6b87e88e3 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -290,6 +290,7 @@ pub struct Config { pub max_blocks_per_update: Option, /// If set to true, the eth1 caches are wiped clean when the eth1 service starts. pub purge_cache: bool, + pub execution_timeout_multiplier: u32, } impl Config { @@ -347,6 +348,7 @@ impl Default for Config { max_log_requests_per_update: Some(5_000), max_blocks_per_update: Some(8_192), purge_cache: false, + execution_timeout_multiplier: 1, } } } @@ -361,11 +363,13 @@ pub fn endpoint_from_config(config: &Config) -> Result { } => { let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; - HttpJsonRpc::new_with_auth(endpoint, auth) + HttpJsonRpc::new_with_auth(endpoint, auth, Some(config.execution_timeout_multiplier)) + .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) + } + Eth1Endpoint::NoAuth(endpoint) => { + HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier)) .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) } - Eth1Endpoint::NoAuth(endpoint) => HttpJsonRpc::new(endpoint) - .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)), } } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 9f81f91e19..7e58f07e24 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -493,7 +493,8 @@ mod deposit_tree { let mut deposit_roots = vec![]; let mut deposit_counts = vec![]; - let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); + let client = + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); // Perform deposits to the smart contract, recording it's state along the way. for deposit in &deposits { @@ -597,7 +598,8 @@ mod http { .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); + let client = + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); let block_number = get_block_number(&web3).await; let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; @@ -711,7 +713,8 @@ mod fast { MainnetEthSpec::default_spec(), ) .unwrap(); - let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); + let client = + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 0f848a7716..be68c37b06 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -518,22 +518,32 @@ pub mod deposit_methods { pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, + pub execution_timeout_multiplier: u32, auth: Option, } impl HttpJsonRpc { - pub fn new(url: SensitiveUrl) -> Result { + pub fn new( + url: SensitiveUrl, + execution_timeout_multiplier: Option, + ) -> Result { Ok(Self { client: Client::builder().build()?, url, + execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), auth: None, }) } - pub fn new_with_auth(url: SensitiveUrl, auth: Auth) -> Result { + pub fn new_with_auth( + url: SensitiveUrl, + auth: Auth, + execution_timeout_multiplier: Option, + ) -> Result { Ok(Self { client: Client::builder().build()?, url, + execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), auth: Some(auth), }) } @@ -590,7 +600,11 @@ impl std::fmt::Display for HttpJsonRpc { impl HttpJsonRpc { pub async fn upcheck(&self) -> Result<(), Error> { let result: serde_json::Value = self - .rpc_request(ETH_SYNCING, json!([]), ETH_SYNCING_TIMEOUT) + .rpc_request( + ETH_SYNCING, + json!([]), + ETH_SYNCING_TIMEOUT * self.execution_timeout_multiplier, + ) .await?; /* @@ -614,7 +628,7 @@ impl HttpJsonRpc { self.rpc_request( ETH_GET_BLOCK_BY_NUMBER, params, - ETH_GET_BLOCK_BY_NUMBER_TIMEOUT, + ETH_GET_BLOCK_BY_NUMBER_TIMEOUT * self.execution_timeout_multiplier, ) .await } @@ -625,8 +639,12 @@ impl HttpJsonRpc { ) -> Result, Error> { let params = json!([block_hash, RETURN_FULL_TRANSACTION_OBJECTS]); - self.rpc_request(ETH_GET_BLOCK_BY_HASH, params, ETH_GET_BLOCK_BY_HASH_TIMEOUT) - .await + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await } pub async fn get_block_by_hash_with_txns( @@ -634,8 +652,12 @@ impl HttpJsonRpc { block_hash: ExecutionBlockHash, ) -> Result>, Error> { let params = json!([block_hash, true]); - self.rpc_request(ETH_GET_BLOCK_BY_HASH, params, ETH_GET_BLOCK_BY_HASH_TIMEOUT) - .await + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await } pub async fn new_payload_v1( @@ -645,7 +667,11 @@ impl HttpJsonRpc { let params = json!([JsonExecutionPayloadV1::from(execution_payload)]); let response: JsonPayloadStatusV1 = self - .rpc_request(ENGINE_NEW_PAYLOAD_V1, params, ENGINE_NEW_PAYLOAD_TIMEOUT) + .rpc_request( + ENGINE_NEW_PAYLOAD_V1, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) .await?; Ok(response.into()) @@ -658,7 +684,11 @@ impl HttpJsonRpc { let params = json!([JsonPayloadIdRequest::from(payload_id)]); let response: JsonExecutionPayloadV1 = self - .rpc_request(ENGINE_GET_PAYLOAD_V1, params, ENGINE_GET_PAYLOAD_TIMEOUT) + .rpc_request( + ENGINE_GET_PAYLOAD_V1, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) .await?; Ok(response.into()) @@ -678,7 +708,7 @@ impl HttpJsonRpc { .rpc_request( ENGINE_FORKCHOICE_UPDATED_V1, params, - ENGINE_FORKCHOICE_UPDATED_TIMEOUT, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, ) .await?; @@ -695,7 +725,8 @@ impl HttpJsonRpc { .rpc_request( ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, params, - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT, + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT + * self.execution_timeout_multiplier, ) .await?; @@ -732,13 +763,13 @@ mod test { let echo_auth = Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); ( - Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth).unwrap()), - Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None).unwrap()), ) } else { ( - Arc::new(HttpJsonRpc::new(rpc_url).unwrap()), - Arc::new(HttpJsonRpc::new(echo_url).unwrap()), + Arc::new(HttpJsonRpc::new(rpc_url, None).unwrap()), + Arc::new(HttpJsonRpc::new(echo_url, None).unwrap()), ) }; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 68071ee9b1..f222f28c33 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -159,6 +159,7 @@ pub struct Config { pub default_datadir: PathBuf, /// The minimum value of an external payload for it to be considered in a proposal. pub builder_profit_threshold: u128, + pub execution_timeout_multiplier: Option, } /// Provides access to one execution engine and provides a neat interface for consumption by the @@ -180,6 +181,7 @@ impl ExecutionLayer { jwt_version, default_datadir, builder_profit_threshold, + execution_timeout_multiplier, } = config; if urls.len() > 1 { @@ -224,7 +226,8 @@ impl ExecutionLayer { let engine: Engine = { let auth = Auth::new(jwt_key, jwt_id, jwt_version); debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); - let api = HttpJsonRpc::new_with_auth(execution_url, auth).map_err(Error::ApiError)?; + let api = HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier) + .map_err(Error::ApiError)?; Engine::new(api, executor.clone(), &log) }; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 1e51849876..0b7518b957 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -503,7 +503,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .requires("execution-endpoint") .takes_value(true) ) - + .arg( + Arg::with_name("execution-timeout-multiplier") + .long("execution-timeout-multiplier") + .value_name("NUM") + .help("Unsigned integer to multiply the default execution timeouts by.") + .default_value("1") + .takes_value(true) + ) /* * Database purging and compaction. */ diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index ecd4d736a6..7666134b41 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -335,6 +335,9 @@ pub fn get_config( el_config.default_datadir = client_config.data_dir.clone(); el_config.builder_profit_threshold = clap_utils::parse_required(cli_args, "builder-profit-threshold")?; + let execution_timeout_multiplier = + clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; + el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); // If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and // use `--execution-endpoint` instead. Also, log a deprecation warning. diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index a00fd7a822..34041a82c8 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -407,6 +407,22 @@ fn run_execution_jwt_secret_key_is_persisted() { }); } #[test] +fn execution_timeout_multiplier_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("execution-endpoint", Some("http://meow.cats")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .flag("execution-timeout-multiplier", Some("3")) + .run_with_zero_port() + .with_config(|config| { + let config = config.execution_layer.as_ref().unwrap(); + assert_eq!(config.execution_timeout_multiplier, Some(3)); + }); +} +#[test] fn merge_execution_endpoints_flag() { run_merge_execution_endpoints_flag_test("execution-endpoints") } From d0efb6b18af9a274e4059b6bc953f470f00c18b1 Mon Sep 17 00:00:00 2001 From: pinkiebell <40266861+pinkiebell@users.noreply.github.com> Date: Wed, 19 Oct 2022 22:55:49 +0000 Subject: [PATCH 12/21] beacon_node: add --disable-deposit-contract-sync flag (#3597) Overrides any previous option that enables the eth1 service. Useful for operating a `light` beacon node. Co-authored-by: Michael Sproul --- beacon_node/client/src/builder.rs | 4 +++- beacon_node/src/cli.rs | 8 ++++++++ beacon_node/src/config.rs | 5 +++++ lighthouse/tests/beacon_node.rs | 34 +++++++++++++++++++++++++++++++ 4 files changed, 50 insertions(+), 1 deletion(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index a46d91ad1e..efd91cfdf6 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -457,7 +457,9 @@ where ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?, }; - self.eth1_service = eth1_service_option; + if config.sync_eth1_chain { + self.eth1_service = eth1_service_option; + } self.beacon_chain_builder = Some(beacon_chain_builder); Ok(self) } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0b7518b957..81a7c6bbeb 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -852,4 +852,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { failure caused by the execution layer.") .takes_value(false) ) + .arg( + Arg::with_name("disable-deposit-contract-sync") + .long("disable-deposit-contract-sync") + .help("Explictly disables syncing of deposit logs from the execution node. \ + This overrides any previous option that depends on it. \ + Useful if you intend to run a non-validating beacon node.") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 7666134b41..3b94c31290 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -668,6 +668,11 @@ pub fn get_config( client_config.chain.enable_lock_timeouts = false; } + // Note: This overrides any previous flags that enable this option. + if cli_args.is_present("disable-deposit-contract-sync") { + client_config.sync_eth1_chain = false; + } + if let Some(timeout) = clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? { diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 34041a82c8..b1498f109d 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1527,3 +1527,37 @@ fn enabled_disable_log_timestamp_flag() { assert!(config.logger_config.disable_log_timestamp); }); } + +#[test] +fn sync_eth1_chain_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); +} + +#[test] +fn sync_eth1_chain_execution_endpoints_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("execution-endpoints", Some("http://localhost:8551/")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); +} + +#[test] +fn sync_eth1_chain_disable_deposit_contract_sync_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("disable-deposit-contract-sync", None) + .flag("execution-endpoints", Some("http://localhost:8551/")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); +} From dbb93cd0d268513017d75936d9097732cf2d5ee3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 19 Oct 2022 22:55:50 +0000 Subject: [PATCH 13/21] bors: require slasher and syncing sim tests (#3645) ## Issue Addressed I noticed that [this build](https://github.com/sigp/lighthouse/actions/runs/3269950873/jobs/5378036501) wasn't marked failed by Bors when the `syncing-simulator-ubuntu` job failed. This is because that job is absent from the `bors.toml` config. ## Proposed Changes Add missing jobs to Bors config so that they are required: - `syncing-simulator-ubuntu` - `slasher-tests` - `disallowed-from-async-lint` The `disallowed-from-async-lint` was previously allowed to fail because it was considered beta, but I think it's stable enough now we may as well require it. --- bors.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bors.toml b/bors.toml index 0ff5d6231b..6edf55bfa3 100644 --- a/bors.toml +++ b/bors.toml @@ -20,7 +20,10 @@ status = [ "doppelganger-protection-test", "execution-engine-integration-ubuntu", "cargo-vendor", - "check-msrv" + "check-msrv", + "slasher-tests", + "syncing-simulator-ubuntu", + "disallowed-from-async-lint" ] use_squash_merge = true timeout_sec = 10800 From 3a5888e53d03e4cdd8f52639ffcee06ea24fb1c0 Mon Sep 17 00:00:00 2001 From: Divma Date: Mon, 24 Oct 2022 21:39:30 +0000 Subject: [PATCH 14/21] Ban and unban peers at the swarm level (#3653) ## Issue Addressed I missed this from https://github.com/sigp/lighthouse/pull/3491. peers were being banned at the behaviour level only. The identify errors are explained by this as well ## Proposed Changes Add banning and unbanning ## Additional Info Befor,e having tests that catch this was hard because the swarm was outside the behaviour. We could now have tests that prevent something like this in the future --- beacon_node/lighthouse_network/src/service/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 53d29ccb21..97d96d171d 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1342,10 +1342,12 @@ impl Network { Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { + self.swarm.ban_peer_id(peer_id); self.discovery_mut().ban_peer(&peer_id, associated_ips); Some(NetworkEvent::PeerBanned(peer_id)) } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { + self.swarm.unban_peer_id(peer_id); self.discovery_mut().unban_peer(&peer_id, associated_ips); Some(NetworkEvent::PeerUnbanned(peer_id)) } From fcfd02aeec435203269b03865e3ccc23e5f51e6d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 25 Oct 2022 06:36:51 +0000 Subject: [PATCH 15/21] Release v3.2.0 (#3647) ## Issue Addressed NA ## Proposed Changes Bump version to `v3.2.0` ## Additional Info - ~~Blocked on #3597~~ - ~~Blocked on #3645~~ - ~~Blocked on #3653~~ - ~~Requires additional testing~~ --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 34c932307d..4616ee8a4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,7 +439,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.1.2" +version = "3.2.0" dependencies = [ "beacon_chain", "clap", @@ -597,7 +597,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.1.2" +version = "3.2.0" dependencies = [ "beacon_node", "clap", @@ -3105,7 +3105,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.1.2" +version = "3.2.0" dependencies = [ "account_utils", "beacon_chain", @@ -3605,7 +3605,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.1.2" +version = "3.2.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index d48dd6cac0..e4441dcbd6 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.1.2" +version = "3.2.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 3f406e88fc..5d5f7ce723 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.1.2-", - fallback = "Lighthouse/v3.1.2" + prefix = "Lighthouse/v3.2.0-", + fallback = "Lighthouse/v3.2.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 6b9bb33838..ad9e9f49a1 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.1.2" +version = "3.2.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 94bb62b1af..3a40d5a541 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.1.2" +version = "3.2.0" authors = ["Sigma Prime "] edition = "2021" autotests = false From 77eabc5401223d7de06a55204d69e68a92e0a54d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 26 Oct 2022 06:50:04 +0000 Subject: [PATCH 16/21] Revert "Optimise HTTP validator lookups" (#3658) ## Issue Addressed This reverts commit ca9dc8e0947a0ec83f31830aaabc1ffbd3c14c9c (PR #3559) with some modifications. ## Proposed Changes Unfortunately that PR introduced a performance regression in fork choice. The optimisation _intended_ to build the exit and pubkey caches on the head state _only if_ they were not already built. However, due to the head state always being cloned without these caches, we ended up building them every time the head changed, leading to a ~70ms+ penalty on mainnet. https://github.com/sigp/lighthouse/blob/fcfd02aeec435203269b03865e3ccc23e5f51e6d/beacon_node/beacon_chain/src/canonical_head.rs#L633-L636 I believe this is a severe enough regression to justify immediately releasing v3.2.1 with this change. ## Additional Info I didn't fully revert #3559, because there were some unrelated deletions of dead code in that PR which I figured we may as well keep. An alternative would be to clone the extra caches, but this likely still imposes some cost, so in the interest of applying a conservative fix quickly, I think reversion is the best approach. The optimisation from #3559 was not even optimising a particularly significant path, it was mostly for VCs running larger numbers of inactive keys. We can re-do it in the `tree-states` world where cache clones are cheap. --- .../beacon_chain/src/canonical_head.rs | 6 +++--- beacon_node/http_api/src/lib.rs | 21 ++----------------- consensus/types/src/beacon_state.rs | 15 ------------- 3 files changed, 5 insertions(+), 37 deletions(-) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 53e0fbaac9..c9bd6db0e6 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -654,11 +654,11 @@ impl BeaconChain { }) }) .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build all the - // caches except the tree hash cache. + // Regardless of where we got the state from, attempt to build the committee + // caches. snapshot .beacon_state - .build_all_caches(&self.spec) + .build_all_committee_caches(&self.spec) .map_err(Into::into) .map(|()| snapshot) })?; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 51e97c893d..5b4fa5816d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -668,10 +668,9 @@ pub fn serve( "Invalid validator ID".to_string(), )) })) - .and(log_filter.clone()) .and(warp::path::end()) .and_then( - |state_id: StateId, chain: Arc>, validator_id: ValidatorId, log| { + |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { blocking_json_task(move || { let (data, execution_optimistic) = state_id .map_state_and_execution_optimistic( @@ -679,23 +678,7 @@ pub fn serve( |state, execution_optimistic| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { - // Fast path: use the pubkey cache which is probably - // initialised at the head. - match state.get_validator_index_read_only(pubkey) { - Ok(result) => result, - Err(e) => { - // Slow path, fall back to iteration. - debug!( - log, - "Validator look-up cache miss"; - "reason" => ?e, - ); - state - .validators() - .iter() - .position(|v| v.pubkey == *pubkey) - } - } + state.validators().iter().position(|v| v.pubkey == *pubkey) } ValidatorId::Index(index) => Some(*index as usize), }; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 46a431d073..a5d00cdf2d 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -447,21 +447,6 @@ impl BeaconState { Ok(self.pubkey_cache().get(pubkey)) } - /// Immutable variant of `get_validator_index` which errors if the cache is not up to date. - pub fn get_validator_index_read_only( - &self, - pubkey: &PublicKeyBytes, - ) -> Result, Error> { - let pubkey_cache = self.pubkey_cache(); - if pubkey_cache.len() != self.validators().len() { - return Err(Error::PubkeyCacheIncomplete { - cache_len: pubkey_cache.len(), - registry_len: self.validators().len(), - }); - } - Ok(pubkey_cache.get(pubkey)) - } - /// The epoch corresponding to `self.slot()`. pub fn current_epoch(&self) -> Epoch { self.slot().epoch(T::slots_per_epoch()) From 6d5a2b509fac7b6ffe693866f58ba49989f946d7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 26 Oct 2022 09:38:25 +0000 Subject: [PATCH 17/21] Release v3.2.1 (#3660) ## Proposed Changes Patch release to include the performance regression fix https://github.com/sigp/lighthouse/pull/3658. ## Additional Info ~~Blocked on the merge of https://github.com/sigp/lighthouse/pull/3658.~~ --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4616ee8a4b..ad6aac8f42 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,7 +439,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.2.0" +version = "3.2.1" dependencies = [ "beacon_chain", "clap", @@ -597,7 +597,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.2.0" +version = "3.2.1" dependencies = [ "beacon_node", "clap", @@ -3105,7 +3105,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.2.0" +version = "3.2.1" dependencies = [ "account_utils", "beacon_chain", @@ -3605,7 +3605,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.2.0" +version = "3.2.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index e4441dcbd6..b85aae2f4f 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.2.0" +version = "3.2.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 5d5f7ce723..a48ba211d9 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.2.0-", - fallback = "Lighthouse/v3.2.0" + prefix = "Lighthouse/v3.2.1-", + fallback = "Lighthouse/v3.2.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index ad9e9f49a1..b4f630ae15 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.2.0" +version = "3.2.1" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 3a40d5a541..864869a149 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.2.0" +version = "3.2.1" authors = ["Sigma Prime "] edition = "2021" autotests = false From f2f920dec8533e123991b1c7cfcb3c5bf09e68cc Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 28 Oct 2022 03:23:49 +0000 Subject: [PATCH 18/21] Added lightclient server side containers (#3655) ## Issue Addressed This PR partially addresses #3651 ## Proposed Changes This PR adds the following containers types from [the lightclient specs](https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md): `LightClientUpdate`, `LightClientFinalityUpdate`, `LightClientOptimisticUpdate` and `LightClientBootstrap`. It also implements the creation of each updates as delined by this [document](https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/full-node.md). ## Additional Info Here is a brief description of what each of these container signify: `LightClientUpdate`: This container is only provided by server (full node) to lightclients when catching up new sync committees beetwen periods and we want possibly one lightclient update ready for each post-altair period the lighthouse node go over. it is needed in the resp/req in method `light_client_update_by_range`. `LightClientFinalityUpdate/LightClientFinalityUpdate`: Lighthouse will need only the latest of each of this kind of updates, so no need to store them in the database, we can just store the latest one of each one in memory and then just supply them via gossip or respreq, only the latest ones are served by a full node. finality updates marks the transition to a new finalized header, while optimistic updates signify new non-finalized header which are imported optimistically. `LightClientBootstrap`: This object is retrieved by lightclients during the bootstrap process after a finalized checkpoint is retrieved, ideally we want to store a LightClientBootstrap for each finalized root and then serve each of them by finalized root in respreq protocol id `light_client_bootstrap`. Little digression to how we implement the creation of each updates: the creation of a optimistic/finality update is just a version of the lightclient_update creation mechanism with less fields being set, there is underlying concept of inheritance, if you look at the specs it becomes very obvious that a lightclient update is just an extension of a finality update and a finality update an extension to an optimistic update. ## Extra note `LightClientStore` is not implemented as it is only useful as internal storage design for the lightclient side. --- consensus/types/src/lib.rs | 3 + consensus/types/src/light_client_bootstrap.rs | 45 +++++ .../types/src/light_client_finality_update.rs | 80 ++++++++ .../src/light_client_optimistic_update.rs | 59 ++++++ consensus/types/src/light_client_update.rs | 171 ++++++++++++++++++ 5 files changed, 358 insertions(+) create mode 100644 consensus/types/src/light_client_bootstrap.rs create mode 100644 consensus/types/src/light_client_finality_update.rs create mode 100644 consensus/types/src/light_client_optimistic_update.rs create mode 100644 consensus/types/src/light_client_update.rs diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 32300173eb..7f618dc348 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -49,6 +49,9 @@ pub mod free_attestation; pub mod graffiti; pub mod historical_batch; pub mod indexed_attestation; +pub mod light_client_bootstrap; +pub mod light_client_optimistic_update; +pub mod light_client_update; pub mod pending_attestation; pub mod proposer_preparation_data; pub mod proposer_slashing; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs new file mode 100644 index 0000000000..406136d542 --- /dev/null +++ b/consensus/types/src/light_client_bootstrap.rs @@ -0,0 +1,45 @@ +use super::{BeaconBlockHeader, BeaconState, EthSpec, FixedVector, Hash256, SyncCommittee}; +use crate::{light_client_update::*, test_utils::TestRandom}; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; + +/// A LightClientBootstrap is the initializer we send over to lightclient nodes +/// that are trying to generate their basic storage when booting up. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct LightClientBootstrap { + /// Requested beacon block header. + pub header: BeaconBlockHeader, + /// The `SyncCommittee` used in the requested period. + pub current_sync_committee: Arc>, + /// Merkle proof for sync committee + pub current_sync_committee_branch: FixedVector, +} + +impl LightClientBootstrap { + pub fn from_beacon_state(beacon_state: BeaconState) -> Result { + let mut header = beacon_state.latest_block_header().clone(); + header.state_root = beacon_state.tree_hash_root(); + Ok(LightClientBootstrap { + header, + current_sync_committee: beacon_state.current_sync_committee()?.clone(), + /// TODO(Giulio2002): Generate Merkle Proof, this is just empty hashes + current_sync_committee_branch: FixedVector::new(vec![ + Hash256::zero(); + CURRENT_SYNC_COMMITTEE_PROOF_LEN + ])?, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + + ssz_tests!(LightClientBootstrap); +} diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs new file mode 100644 index 0000000000..c93d15a1a0 --- /dev/null +++ b/consensus/types/src/light_client_finality_update.rs @@ -0,0 +1,80 @@ +use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; +use crate::{light_client_update::*, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; +use safe_arith::ArithError; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::typenum::{U5, U6}; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; + +/// A LightClientFinalityUpdate is the update lightclient request or received by a gossip that +/// signal a new finalized beacon block header for the light client sync protocol. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct LightClientFinalityUpdate { + /// The last `BeaconBlockHeader` from the last attested block by the sync committee. + pub attested_header: BeaconBlockHeader, + /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). + pub finalized_header: BeaconBlockHeader, + /// Merkle proof attesting finalized header. + pub finality_branch: FixedVector, + /// current sync aggreggate + pub sync_aggregate: SyncAggregate, + /// Slot of the sync aggregated singature + pub signature_slot: Slot, +} + +impl LightClientFinalityUpdate { + pub fn new( + chain_spec: ChainSpec, + beacon_state: BeaconState, + block: BeaconBlock, + attested_state: BeaconState, + finalized_block: BeaconBlock, + ) -> Result { + let altair_fork_epoch = chain_spec + .altair_fork_epoch + .ok_or(Error::AltairForkNotActive)?; + if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { + return Err(Error::AltairForkNotActive); + } + + let sync_aggregate = block.body().sync_aggregate()?; + if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { + return Err(Error::NotEnoughSyncCommitteeParticipants); + } + + // Compute and validate attested header. + let mut attested_header = attested_state.latest_block_header().clone(); + attested_header.state_root = attested_state.tree_hash_root(); + // Build finalized header from finalized block + let finalized_header = BeaconBlockHeader { + slot: finalized_block.slot(), + proposer_index: finalized_block.proposer_index(), + parent_root: finalized_block.parent_root(), + state_root: finalized_block.state_root(), + body_root: finalized_block.body_root(), + }; + if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { + return Err(Error::InvalidFinalizedBlock); + } + // TODO(Giulio2002): compute proper merkle proofs. + Ok(Self { + attested_header: attested_header, + finalized_header: finalized_header, + finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + sync_aggregate: sync_aggregate.clone(), + signature_slot: block.slot(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + + ssz_tests!(LightClientFinalityUpdate); +} diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs new file mode 100644 index 0000000000..9592bf1c23 --- /dev/null +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -0,0 +1,59 @@ +use super::{BeaconBlockHeader, EthSpec, Slot, SyncAggregate}; +use crate::{ + light_client_update::Error, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec, +}; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; + +/// A LightClientOptimisticUpdate is the update we send on each slot, +/// it is based off the current unfinalized epoch is verified only against BLS signature. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct LightClientOptimisticUpdate { + /// The last `BeaconBlockHeader` from the last attested block by the sync committee. + pub attested_header: BeaconBlockHeader, + /// current sync aggreggate + pub sync_aggregate: SyncAggregate, + /// Slot of the sync aggregated singature + pub signature_slot: Slot, +} + +impl LightClientOptimisticUpdate { + pub fn new( + chain_spec: ChainSpec, + block: BeaconBlock, + attested_state: BeaconState, + ) -> Result { + let altair_fork_epoch = chain_spec + .altair_fork_epoch + .ok_or(Error::AltairForkNotActive)?; + if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { + return Err(Error::AltairForkNotActive); + } + + let sync_aggregate = block.body().sync_aggregate()?; + if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { + return Err(Error::NotEnoughSyncCommitteeParticipants); + } + + // Compute and validate attested header. + let mut attested_header = attested_state.latest_block_header().clone(); + attested_header.state_root = attested_state.tree_hash_root(); + Ok(Self { + attested_header, + sync_aggregate: sync_aggregate.clone(), + signature_slot: block.slot(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + + ssz_tests!(LightClientOptimisticUpdate); +} diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs new file mode 100644 index 0000000000..38609cf1bc --- /dev/null +++ b/consensus/types/src/light_client_update.rs @@ -0,0 +1,171 @@ +use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; +use crate::{beacon_state, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; +use safe_arith::ArithError; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::typenum::{U5, U6}; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; + +pub const FINALIZED_ROOT_INDEX: usize = 105; +pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; +pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; + +pub type FinalizedRootProofLen = U6; +pub type CurrentSyncCommitteeProofLen = U5; +pub type NextSyncCommitteeProofLen = U5; + +pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; + +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + SszTypesError(ssz_types::Error), + BeaconStateError(beacon_state::Error), + ArithError(ArithError), + AltairForkNotActive, + NotEnoughSyncCommitteeParticipants, + MismatchingPeriods, + InvalidFinalizedBlock, +} + +impl From for Error { + fn from(e: ssz_types::Error) -> Error { + Error::SszTypesError(e) + } +} + +impl From for Error { + fn from(e: beacon_state::Error) -> Error { + Error::BeaconStateError(e) + } +} + +impl From for Error { + fn from(e: ArithError) -> Error { + Error::ArithError(e) + } +} + +/// A LightClientUpdate is the update we request solely to either complete the bootstraping process, +/// or to sync up to the last committee period, we need to have one ready for each ALTAIR period +/// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct LightClientUpdate { + /// The last `BeaconBlockHeader` from the last attested block by the sync committee. + pub attested_header: BeaconBlockHeader, + /// The `SyncCommittee` used in the next period. + pub next_sync_committee: Arc>, + /// Merkle proof for next sync committee + pub next_sync_committee_branch: FixedVector, + /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). + pub finalized_header: BeaconBlockHeader, + /// Merkle proof attesting finalized header. + pub finality_branch: FixedVector, + /// current sync aggreggate + pub sync_aggregate: SyncAggregate, + /// Slot of the sync aggregated singature + pub signature_slot: Slot, +} + +impl LightClientUpdate { + pub fn new( + chain_spec: ChainSpec, + beacon_state: BeaconState, + block: BeaconBlock, + attested_state: BeaconState, + finalized_block: BeaconBlock, + ) -> Result { + let altair_fork_epoch = chain_spec + .altair_fork_epoch + .ok_or(Error::AltairForkNotActive)?; + if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { + return Err(Error::AltairForkNotActive); + } + + let sync_aggregate = block.body().sync_aggregate()?; + if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { + return Err(Error::NotEnoughSyncCommitteeParticipants); + } + + let signature_period = block.epoch().sync_committee_period(&chain_spec)?; + // Compute and validate attested header. + let mut attested_header = attested_state.latest_block_header().clone(); + attested_header.state_root = attested_state.tree_hash_root(); + let attested_period = attested_header + .slot + .epoch(T::slots_per_epoch()) + .sync_committee_period(&chain_spec)?; + if attested_period != signature_period { + return Err(Error::MismatchingPeriods); + } + // Build finalized header from finalized block + let finalized_header = BeaconBlockHeader { + slot: finalized_block.slot(), + proposer_index: finalized_block.proposer_index(), + parent_root: finalized_block.parent_root(), + state_root: finalized_block.state_root(), + body_root: finalized_block.body_root(), + }; + if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { + return Err(Error::InvalidFinalizedBlock); + } + // TODO(Giulio2002): compute proper merkle proofs. + Ok(Self { + attested_header, + next_sync_committee: attested_state.next_sync_committee()?.clone(), + next_sync_committee_branch: FixedVector::new(vec![ + Hash256::zero(); + NEXT_SYNC_COMMITTEE_PROOF_LEN + ])?, + finalized_header, + finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + sync_aggregate: sync_aggregate.clone(), + signature_slot: block.slot(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + use ssz_types::typenum::Unsigned; + + ssz_tests!(LightClientUpdate); + + #[test] + fn finalized_root_params() { + assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); + assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32 + 1) > FINALIZED_ROOT_INDEX); + assert_eq!(FinalizedRootProofLen::to_usize(), FINALIZED_ROOT_PROOF_LEN); + } + + #[test] + fn current_sync_committee_params() { + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN as u32) <= CURRENT_SYNC_COMMITTEE_INDEX + ); + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN as u32 + 1) > CURRENT_SYNC_COMMITTEE_INDEX + ); + assert_eq!( + CurrentSyncCommitteeProofLen::to_usize(), + CURRENT_SYNC_COMMITTEE_PROOF_LEN + ); + } + + #[test] + fn next_sync_committee_params() { + assert!(2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN as u32) <= NEXT_SYNC_COMMITTEE_INDEX); + assert!(2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN as u32 + 1) > NEXT_SYNC_COMMITTEE_INDEX); + assert_eq!( + NextSyncCommitteeProofLen::to_usize(), + NEXT_SYNC_COMMITTEE_PROOF_LEN + ); + } +} From 5bd1501cb1b4fb7cff5ac292f522743d9297191d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kausik=20Das=20=E2=9C=AA?= Date: Fri, 28 Oct 2022 03:23:50 +0000 Subject: [PATCH 19/21] Book spelling and grammar corrections (#3659) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed There are few spelling and grammar errors in the book. ## Proposed Changes Corrected those spelling and grammar errors in the below files - book/src/advanced-release-candidates.md - book/src/advanced_networking.md - book/src/builders.md - book/src/key-management.md - book/src/merge-migration.md - book/src/wallet-create.md Co-authored-by: Kausik Das Co-authored-by: Kausik Das ✪ --- CONTRIBUTING.md | 2 +- book/src/advanced_networking.md | 2 +- book/src/builders.md | 2 +- book/src/key-management.md | 2 +- book/src/merge-migration.md | 2 +- book/src/wallet-create.md | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 489d12eb88..ef23e1ed57 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -46,7 +46,7 @@ questions. (github.com/YOUR_NAME/lighthouse) of the main repository (github.com/sigp/lighthouse). 3. Once you feel you have addressed the issue, **create a pull-request** to merge - your changes in to the main repository. + your changes into the main repository. 4. Wait for the repository maintainers to **review your changes** to ensure the issue is addressed satisfactorily. Optionally, mention your PR on [discord](https://discord.gg/cyAszAh). diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index d6fcb82a6b..fb7f07a51a 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -59,7 +59,7 @@ TCP and UDP ports (9000 by default). Lighthouse has a number of CLI parameters for constructing and modifying the local Ethereum Node Record (ENR). Examples are `--enr-address`, `--enr-udp-port`, `--enr-tcp-port` and `--disable-enr-auto-update`. These -settings allow you construct your initial ENR. Their primary intention is for +settings allow you to construct your initial ENR. Their primary intention is for setting up boot-like nodes and having a contactable ENR on boot. On normal operation of a Lighthouse node, none of these flags need to be set. Setting these flags incorrectly can lead to your node being incorrectly added to the diff --git a/book/src/builders.md b/book/src/builders.md index e57a4fad14..99fae5b3e7 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -188,7 +188,7 @@ with the builder network: INFO Published validator registrations to the builder network ``` -When you succesfully propose a block using a builder, you will see this log on the beacon node: +When you successfully propose a block using a builder, you will see this log on the beacon node: ``` INFO Successfully published a block to the builder network diff --git a/book/src/key-management.md b/book/src/key-management.md index 30d649f346..bb1751be16 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -103,7 +103,7 @@ opt-in). Instead, we assert that since the withdrawal keys can be regenerated from a mnemonic, having them lying around on the file-system only presents risk and complexity. -At the time or writing, we do not expose the commands to regenerate keys from +At the time of writing, we do not expose the commands to regenerate keys from mnemonics. However, key regeneration is tested on the public Lighthouse repository and will be exposed prior to mainnet launch. diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 780be5836d..8596cd942c 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -2,7 +2,7 @@ This document provides detail for users who want to run a merge-ready Lighthouse node. -> The merge is occuring on mainnet in September. You _must_ have a merge-ready setup by September 6 +> The merge is occurring on mainnet in September. You _must_ have a merge-ready setup by September 6 > 2022. ## Necessary Configuration diff --git a/book/src/wallet-create.md b/book/src/wallet-create.md index 0ebb449177..25cac8d34d 100644 --- a/book/src/wallet-create.md +++ b/book/src/wallet-create.md @@ -11,7 +11,7 @@ backed up, all validator keys can be trivially re-generated. The 24-word string is randomly generated during wallet creation and printed out to the terminal. It's important to **make one or more backups of the mnemonic** -to ensure your ETH is not lost in the case of data loss. It very important to +to ensure your ETH is not lost in the case of data loss. It is very important to **keep your mnemonic private** as it represents the ultimate control of your ETH. From 46fbf5b98b5a32c49e757da6d0c239c25a35eeb7 Mon Sep 17 00:00:00 2001 From: Divma Date: Fri, 28 Oct 2022 05:40:06 +0000 Subject: [PATCH 20/21] Update discv5 (#3171) ## Issue Addressed Updates discv5 Pending on - [x] #3547 - [x] Alex upgrades his deps ## Proposed Changes updates discv5 and the enr crate. The only relevant change would be some clear indications of ipv4 usage in lighthouse ## Additional Info Functionally, this should be equivalent to the prev version. As draft pending a discv5 release --- Cargo.lock | 618 ++++++------------ beacon_node/execution_layer/Cargo.toml | 8 +- beacon_node/lighthouse_network/Cargo.toml | 4 +- beacon_node/lighthouse_network/src/config.rs | 2 +- .../lighthouse_network/src/discovery/enr.rs | 10 +- .../src/discovery/enr_ext.rs | 29 +- .../lighthouse_network/src/discovery/mod.rs | 21 +- .../src/peer_manager/network_behaviour.rs | 2 +- boot_node/src/server.rs | 10 +- common/deposit_contract/Cargo.toml | 2 +- common/eth2_network_config/Cargo.toml | 2 +- crypto/eth2_hashing/Cargo.toml | 2 +- crypto/eth2_key_derivation/Cargo.toml | 2 +- crypto/eth2_keystore/Cargo.toml | 2 +- lighthouse/tests/boot_node.rs | 4 +- .../execution_engine_integration/Cargo.toml | 4 +- 16 files changed, 270 insertions(+), 452 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ad6aac8f42..04cfd42350 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -258,9 +258,9 @@ dependencies = [ [[package]] name = "auto_impl" -version = "0.5.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7862e21c893d65a1650125d157eaeec691439379a1cee17ee49031b79236ada4" +checksum = "8a8c1df849285fbacd587de7818cc7d13be6cd2cbcd47a04fb1801b0e2706e33" dependencies = [ "proc-macro-error", "proc-macro2", @@ -366,7 +366,7 @@ checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=de34eeb#de34eeb92e4fdee5709d142910abf42cf857609b" +source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7d5d8dad1648f771573f42585ad8080a45b05689#7d5d8dad1648f771573f42585ad8080a45b05689" dependencies = [ "ethereum-consensus", "http", @@ -913,15 +913,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.5.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279bc8fc53f788a75c7804af68237d1fce02cde1e275a886a4b320604dc2aeda" - -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" [[package]] name = "convert_case" @@ -1061,9 +1055,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.3.2" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" dependencies = [ "generic-array", "rand_core 0.6.3", @@ -1272,28 +1266,19 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.9.9", + "sha2 0.10.2", "tree_hash", "types", ] [[package]] name = "der" -version = "0.3.5" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eeb9d92785d1facb50567852ce75d0858630630e7eabea59cf7eb7474051087" +checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" dependencies = [ - "const-oid 0.5.2", - "typenum", -] - -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid 0.7.1", + "const-oid", + "zeroize", ] [[package]] @@ -1403,14 +1388,14 @@ dependencies = [ [[package]] name = "discv5" -version = "0.1.0-beta.13" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed8f54486179d5a7f11e1f5526f49d925a411a96c1141a707bd5f071be2ab630" +checksum = "d767c0e59b3e8d65222d95df723cc2ea1da92bb0f27c563607e6f0bde064f255" dependencies = [ "aes", "aes-gcm", "arrayvec", - "digest 0.10.3", + "delay_map", "enr", "fnv", "futures", @@ -1418,13 +1403,14 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p-core 0.30.2", + "libp2p-core", "lru", + "more-asserts", "parking_lot 0.11.2", "rand 0.8.5", "rlp", - "sha2 0.9.9", "smallvec", + "socket2", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -1442,24 +1428,12 @@ checksum = "5caaa75cbd2b960ff1e5392d2cfb1f44717fffe12fc1f32b7b5d1267f99732a6" [[package]] name = "ecdsa" -version = "0.11.1" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d33b390ab82f2e1481e331dbd0530895640179d2128ef9a79cc690b78d1eba" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der 0.3.5", - "elliptic-curve 0.9.12", - "hmac 0.11.0", - "signature", -] - -[[package]] -name = "ecdsa" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" -dependencies = [ - "der 0.5.1", - "elliptic-curve 0.11.12", + "der", + "elliptic-curve", "rfc6979", "signature", ] @@ -1526,32 +1500,18 @@ checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" [[package]] name = "elliptic-curve" -version = "0.9.12" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" -dependencies = [ - "bitvec 0.20.4", - "ff 0.9.0", - "generic-array", - "group 0.9.0", - "pkcs8 0.6.1", - "rand_core 0.6.3", - "subtle", - "zeroize", -] - -[[package]] -name = "elliptic-curve" -version = "0.11.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", "crypto-bigint", - "der 0.5.1", - "ff 0.11.1", + "der", + "digest 0.10.3", + "ff", "generic-array", - "group 0.11.0", + "group", + "pkcs8", "rand_core 0.6.3", "sec1", "subtle", @@ -1569,21 +1529,21 @@ dependencies = [ [[package]] name = "enr" -version = "0.5.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809869a1328bfb586b48c9c0f87761c47c41793a85bcb06f66074a87cafc1bcd" +checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" dependencies = [ "base64", "bs58", "bytes", "ed25519-dalek", "hex", - "k256 0.8.1", + "k256", "log", "rand 0.8.5", "rlp", "serde", - "sha3 0.9.1", + "sha3 0.10.1", "zeroize", ] @@ -1593,7 +1553,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro2", "quote", "syn", @@ -1768,7 +1728,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.9.9", + "sha2 0.10.2", "zeroize", ] @@ -1941,14 +1901,15 @@ dependencies = [ [[package]] name = "ethereum-consensus" version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e1188b1#e1188b14f320f225f2e53aa10336614565f04129" +source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=a8110af76d97bf2bf27fb987a671808fcbdf1834#a8110af76d97bf2bf27fb987a671808fcbdf1834" dependencies = [ "async-stream", "blst", + "bs58", "enr", "hex", "integer-sqrt", - "multiaddr 0.14.0", + "multiaddr", "rand 0.8.5", "serde", "serde_json", @@ -1989,29 +1950,36 @@ dependencies = [ [[package]] name = "ethers-core" -version = "0.6.0" -source = "git+https://github.com/gakonst/ethers-rs?rev=02ad93a1cfb7b62eb051c77c61dc4c0218428e4a#02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ebdd63c828f58aa067f40f9adcbea5e114fb1f90144b3a1e2858e0c9b1ff4e8" dependencies = [ "arrayvec", "bytes", - "elliptic-curve 0.11.12", + "chrono", + "elliptic-curve", "ethabi 17.1.0", + "fastrlp", "generic-array", "hex", - "k256 0.10.4", + "k256", "rand 0.8.5", "rlp", "rlp-derive", + "rust_decimal", "serde", "serde_json", + "strum", "thiserror", "tiny-keccak", + "unicode-xid", ] [[package]] name = "ethers-providers" -version = "0.6.0" -source = "git+https://github.com/gakonst/ethers-rs?rev=02ad93a1cfb7b62eb051c77c61dc4c0218428e4a#02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e46482e4d1e79b20c338fd9db9e166184eb387f0a4e7c05c5b5c0aa2e8c8900c" dependencies = [ "async-trait", "auto_impl", @@ -2021,11 +1989,13 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", + "getrandom 0.2.7", + "hashers", "hex", "http", "once_cell", "parking_lot 0.11.2", - "pin-project 1.0.11", + "pin-project", "reqwest", "serde", "serde_json", @@ -2141,21 +2111,35 @@ dependencies = [ ] [[package]] -name = "ff" -version = "0.9.0" +name = "fastrlp" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a4d941a5b7c2a75222e2d44fcdf634a67133d9db31e177ae5ff6ecda852bfe" +checksum = "089263294bb1c38ac73649a6ad563dd9a5142c8dc0482be15b8b9acb22a1611e" dependencies = [ - "bitvec 0.20.4", - "rand_core 0.6.3", - "subtle", + "arrayvec", + "auto_impl", + "bytes", + "ethereum-types 0.13.1", + "fastrlp-derive", +] + +[[package]] +name = "fastrlp-derive" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fa41ebc231af281098b11ad4a4f6182ec9096902afffe948034a20d4e1385a" +dependencies = [ + "bytes", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "ff" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" +checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" dependencies = [ "rand_core 0.6.3", "subtle", @@ -2389,6 +2373,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generic-array" version = "0.14.5" @@ -2440,8 +2433,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -2490,22 +2485,11 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "group" -version = "0.9.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" +checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" dependencies = [ - "ff 0.9.0", - "rand_core 0.6.3", - "subtle", -] - -[[package]] -name = "group" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" -dependencies = [ - "ff 0.11.1", + "ff", "rand_core 0.6.3", "subtle", ] @@ -2553,6 +2537,15 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashers" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" +dependencies = [ + "fxhash", +] + [[package]] name = "hashlink" version = "0.7.0" @@ -2587,15 +2580,6 @@ dependencies = [ "http", ] -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "heck" version = "0.4.0" @@ -2625,12 +2609,11 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hkdf" -version = "0.11.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01706d578d5c281058480e673ae4086a9f4710d8df1ad80a5b03e39ece5f886b" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" dependencies = [ - "digest 0.9.0", - "hmac 0.11.0", + "hmac 0.12.1", ] [[package]] @@ -2653,6 +2636,15 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.3", +] + [[package]] name = "hmac-drbg" version = "0.3.0" @@ -3059,27 +3051,15 @@ dependencies = [ [[package]] name = "k256" -version = "0.8.1" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3e8e491ed22bc161583a1c77e42313672c483eba6bd9d7afec0f1131d0b9ce" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ "cfg-if", - "ecdsa 0.11.1", - "elliptic-curve 0.9.12", - "sha2 0.9.9", -] - -[[package]] -name = "k256" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" -dependencies = [ - "cfg-if", - "ecdsa 0.13.4", - "elliptic-curve 0.11.12", - "sec1", - "sha3 0.9.1", + "ecdsa", + "elliptic-curve", + "sha2 0.10.2", + "sha3 0.10.1", ] [[package]] @@ -3231,7 +3211,7 @@ dependencies = [ "getrandom 0.2.7", "instant", "lazy_static", - "libp2p-core 0.36.0", + "libp2p-core", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3244,48 +3224,13 @@ dependencies = [ "libp2p-tcp", "libp2p-websocket", "libp2p-yamux", - "multiaddr 0.14.0", + "multiaddr", "parking_lot 0.12.1", - "pin-project 1.0.11", + "pin-project", "rand 0.7.3", "smallvec", ] -[[package]] -name = "libp2p-core" -version = "0.30.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86aad7d54df283db817becded03e611137698a6509d4237a96881976a162340c" -dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek", - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "lazy_static", - "libsecp256k1", - "log", - "multiaddr 0.13.0", - "multihash 0.14.0", - "multistream-select 0.10.4", - "parking_lot 0.11.2", - "pin-project 1.0.11", - "prost 0.9.0", - "prost-build 0.9.0", - "rand 0.8.5", - "ring", - "rw-stream-sink 0.2.1", - "sha2 0.9.9", - "smallvec", - "thiserror", - "unsigned-varint 0.7.1", - "void", - "zeroize", -] - [[package]] name = "libp2p-core" version = "0.36.0" @@ -3303,15 +3248,16 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "multiaddr 0.14.0", - "multihash 0.16.2", - "multistream-select 0.11.0", + "multiaddr", + "multihash", + "multistream-select", + "p256", "parking_lot 0.12.1", - "pin-project 1.0.11", - "prost 0.11.0", - "prost-build 0.11.1", + "pin-project", + "prost", + "prost-build", "rand 0.8.5", - "rw-stream-sink 0.3.0", + "rw-stream-sink", "sha2 0.10.2", "smallvec", "thiserror", @@ -3327,7 +3273,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6cb3c16e3bb2f76c751ae12f0f26e788c89d353babdded40411e7923f01fc978" dependencies = [ "futures", - "libp2p-core 0.36.0", + "libp2p-core", "log", "parking_lot 0.12.1", "smallvec", @@ -3348,12 +3294,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.36.0", + "libp2p-core", "libp2p-swarm", "log", "prometheus-client", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3371,12 +3317,12 @@ dependencies = [ "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.36.0", + "libp2p-core", "libp2p-swarm", "log", "lru", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "prost-codec", "smallvec", "thiserror", @@ -3389,7 +3335,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a74ab339e8b5d989e8c1000a78adb5c064a6319245bb22d1e70b415ec18c39b8" dependencies = [ - "libp2p-core 0.36.0", + "libp2p-core", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3405,7 +3351,7 @@ dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.36.0", + "libp2p-core", "log", "nohash-hasher", "parking_lot 0.12.1", @@ -3424,10 +3370,10 @@ dependencies = [ "curve25519-dalek 3.2.0", "futures", "lazy_static", - "libp2p-core 0.36.0", + "libp2p-core", "log", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3445,10 +3391,10 @@ dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.36.0", + "libp2p-core", "log", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "unsigned-varint 0.7.1", "void", ] @@ -3464,9 +3410,9 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core 0.36.0", + "libp2p-core", "log", - "pin-project 1.0.11", + "pin-project", "rand 0.7.3", "smallvec", "thiserror", @@ -3479,7 +3425,7 @@ version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f02622b9dd150011b4eeec387f8bd013189a2f27da08ba363e7c6e606d77a48" dependencies = [ - "heck 0.4.0", + "heck", "quote", "syn", ] @@ -3495,7 +3441,7 @@ dependencies = [ "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.36.0", + "libp2p-core", "log", "socket2", "tokio", @@ -3510,11 +3456,11 @@ dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.36.0", + "libp2p-core", "log", "parking_lot 0.12.1", "quicksink", - "rw-stream-sink 0.3.0", + "rw-stream-sink", "soketto", "url", "webpki-roots", @@ -3527,7 +3473,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b74ec8dc042b583f0b2b93d52917f3b374c1e4b1cfa79ee74c7672c41257694c" dependencies = [ "futures", - "libp2p-core 0.36.0", + "libp2p-core", "parking_lot 0.12.1", "thiserror", "yamux", @@ -3680,7 +3626,7 @@ dependencies = [ "regex", "serde", "serde_derive", - "sha2 0.9.9", + "sha2 0.10.2", "slog", "slog-async", "slog-term", @@ -3892,7 +3838,7 @@ dependencies = [ [[package]] name = "mev-build-rs" version = "0.2.1" -source = "git+https://github.com/ralexstokes/mev-rs?rev=a088806575805c00d63fa59c002abc5eb1dc7709#a088806575805c00d63fa59c002abc5eb1dc7709" +source = "git+https://github.com/ralexstokes/mev-rs?rev=6c99b0fbdc0427b1625469d2e575303ce08de5b8#6c99b0fbdc0427b1625469d2e575303ce08de5b8" dependencies = [ "async-trait", "axum", @@ -3981,22 +3927,10 @@ dependencies = [ ] [[package]] -name = "multiaddr" -version = "0.13.0" +name = "more-asserts" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ee4ea82141951ac6379f964f71b20876d43712bea8faf6dd1a375e08a46499" -dependencies = [ - "arrayref", - "bs58", - "byteorder", - "data-encoding", - "multihash 0.14.0", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.1", - "url", -] +checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" [[package]] name = "multiaddr" @@ -4008,7 +3942,7 @@ dependencies = [ "bs58", "byteorder", "data-encoding", - "multihash 0.16.2", + "multihash", "percent-encoding", "serde", "static_assertions", @@ -4016,19 +3950,6 @@ dependencies = [ "url", ] -[[package]] -name = "multihash" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "752a61cd890ff691b4411423d23816d5866dd5621e4d1c5687a53b94b5a979d8" -dependencies = [ - "digest 0.9.0", - "generic-array", - "multihash-derive 0.7.2", - "sha2 0.9.9", - "unsigned-varint 0.7.1", -] - [[package]] name = "multihash" version = "0.16.2" @@ -4037,25 +3958,11 @@ checksum = "e3db354f401db558759dfc1e568d010a5d4146f4d3f637be1275ec4a3cf09689" dependencies = [ "core2", "digest 0.10.3", - "multihash-derive 0.8.0", + "multihash-derive", "sha2 0.10.2", "unsigned-varint 0.7.1", ] -[[package]] -name = "multihash-derive" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "424f6e86263cd5294cbd7f1e95746b95aca0e0d66bff31e5a40d6baa87b4aa99" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", - "synstructure", -] - [[package]] name = "multihash-derive" version = "0.8.0" @@ -4094,20 +4001,6 @@ dependencies = [ "twoway", ] -[[package]] -name = "multistream-select" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56a336acba8bc87c8876f6425407dbbe6c417bf478b22015f8fb0994ef3bc0ab" -dependencies = [ - "bytes", - "futures", - "log", - "pin-project 1.0.11", - "smallvec", - "unsigned-varint 0.7.1", -] - [[package]] name = "multistream-select" version = "0.11.0" @@ -4117,7 +4010,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.11", + "pin-project", "smallvec", "unsigned-varint 0.7.1", ] @@ -4436,6 +4329,17 @@ dependencies = [ "types", ] +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2 0.10.2", +] + [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -4611,33 +4515,13 @@ dependencies = [ "rustc_version 0.4.0", ] -[[package]] -name = "pin-project" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" -dependencies = [ - "pin-project-internal 0.4.30", -] - [[package]] name = "pin-project" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" dependencies = [ - "pin-project-internal 1.0.11", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "pin-project-internal", ] [[package]] @@ -4671,23 +4555,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9c2f795bc591cb3384cb64082a578b89207ac92bb89c9d98c1ea2ace7cd8110" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der 0.3.5", - "spki 0.3.0", -] - -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der 0.5.1", - "spki 0.5.4", - "zeroize", + "der", + "spki", ] [[package]] @@ -4884,16 +4757,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" -dependencies = [ - "bytes", - "prost-derive 0.9.0", -] - [[package]] name = "prost" version = "0.11.0" @@ -4901,27 +4764,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" dependencies = [ "bytes", - "prost-derive 0.11.0", -] - -[[package]] -name = "prost-build" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" -dependencies = [ - "bytes", - "heck 0.3.3", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost 0.9.0", - "prost-types 0.9.0", - "regex", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -4931,14 +4774,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" dependencies = [ "bytes", - "heck 0.4.0", + "heck", "itertools", "lazy_static", "log", "multimap", "petgraph", - "prost 0.11.0", - "prost-types 0.11.1", + "prost", + "prost-types", "regex", "tempfile", "which", @@ -4952,24 +4795,11 @@ checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987" dependencies = [ "asynchronous-codec", "bytes", - "prost 0.11.0", + "prost", "thiserror", "unsigned-varint 0.7.1", ] -[[package]] -name = "prost-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.11.0" @@ -4983,16 +4813,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" -dependencies = [ - "bytes", - "prost 0.9.0", -] - [[package]] name = "prost-types" version = "0.11.1" @@ -5000,7 +4820,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" dependencies = [ "bytes", - "prost 0.11.0", + "prost", ] [[package]] @@ -5336,12 +5156,12 @@ dependencies = [ [[package]] name = "rfc6979" -version = "0.1.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +checksum = "88c86280f057430a52f4861551b092a01b419b8eacefc7c995eacb9dc132fe32" dependencies = [ "crypto-bigint", - "hmac 0.11.0", + "hmac 0.12.1", "zeroize", ] @@ -5412,6 +5232,17 @@ dependencies = [ "smallvec", ] +[[package]] +name = "rust_decimal" +version = "1.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" +dependencies = [ + "arrayvec", + "num-traits", + "serde", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -5497,17 +5328,6 @@ version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8" -[[package]] -name = "rw-stream-sink" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" -dependencies = [ - "futures", - "pin-project 0.4.30", - "static_assertions", -] - [[package]] name = "rw-stream-sink" version = "0.3.0" @@ -5515,7 +5335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" dependencies = [ "futures", - "pin-project 1.0.11", + "pin-project", "static_assertions", ] @@ -5618,13 +5438,14 @@ dependencies = [ [[package]] name = "sec1" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "der 0.5.1", + "base16ct", + "der", "generic-array", - "pkcs8 0.8.0", + "pkcs8", "subtle", "zeroize", ] @@ -5927,11 +5748,11 @@ dependencies = [ [[package]] name = "signature" -version = "1.3.2" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2807892cfa58e081aa1f1111391c7a0649d4fa127a4ffbe34bcbfb35a1171a4" +checksum = "deb766570a2825fa972bceff0d195727876a9cdf2460ab2e52d455dc2de47fd9" dependencies = [ - "digest 0.9.0", + "digest 0.10.3", "rand_core 0.6.3", ] @@ -6215,21 +6036,12 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" -version = "0.3.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dae7e047abc519c96350e9484a96c6bf1492348af912fd3446dd2dc323f6268" -dependencies = [ - "der 0.3.5", -] - -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der 0.5.1", + "der", ] [[package]] @@ -6354,7 +6166,7 @@ version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro2", "quote", "rustversion", @@ -6741,7 +6553,7 @@ checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ "futures-util", "log", - "pin-project 1.0.11", + "pin-project", "tokio", "tungstenite 0.14.0", ] @@ -6809,7 +6621,7 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "pin-project 1.0.11", + "pin-project", "pin-project-lite 0.2.9", "tokio", "tower-layer", @@ -6888,7 +6700,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.11", + "pin-project", "tracing", ] @@ -7174,12 +6986,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" - [[package]] name = "unicode-width" version = "0.1.9" @@ -7397,7 +7203,7 @@ dependencies = [ "mime_guess", "multipart", "percent-encoding", - "pin-project 1.0.11", + "pin-project", "scoped-tls", "serde", "serde_json", @@ -7583,7 +7389,7 @@ dependencies = [ "log", "once_cell", "parking_lot 0.12.1", - "pin-project 1.0.11", + "pin-project", "reqwest", "rlp", "secp256k1", diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 770bc4cf8c..bfc748d5b6 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -37,11 +37,11 @@ rand = "0.8.5" zeroize = { version = "1.4.2", features = ["zeroize_derive"] } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" -ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +ethers-core = "0.17.0" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", rev = "a088806575805c00d63fa59c002abc5eb1dc7709"} -ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e1188b1" } -ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } +mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" } +ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "a8110af76d97bf2bf27fb987a671808fcbdf1834" } +ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index e5af0a7499..977f0a1088 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] } +discv5 = { version = "0.1.0", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } eth2_ssz_types = "0.2.2" @@ -26,7 +26,7 @@ smallvec = "1.6.1" tokio-io-timeout = "1.1.1" lru = "0.7.1" parking_lot = "0.12.0" -sha2 = "0.9.1" +sha2 = "0.10" snap = "1.0.1" hex = "0.4.2" tokio-util = { version = "0.6.2", features = ["codec", "compat", "time"] } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 263ef0c7cb..71566b8778 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -176,7 +176,7 @@ impl Default for Config { .filter_rate_limiter(filter_rate_limiter) .filter_max_bans_per_ip(Some(5)) .filter_max_nodes_per_ip(Some(10)) - .table_filter(|enr| enr.ip().map_or(false, |ip| is_global(&ip))) // Filter non-global IPs + .table_filter(|enr| enr.ip4().map_or(false, |ip| is_global(&ip))) // Filter non-global IPs .ban_duration(Some(Duration::from_secs(3600))) .ping_interval(Duration::from_secs(300)) .build(); diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index b513ede59f..6b4b87a5f8 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -149,12 +149,12 @@ pub fn create_enr_builder_from_config( builder.ip(enr_address); } if let Some(udp_port) = config.enr_udp_port { - builder.udp(udp_port); + builder.udp4(udp_port); } // we always give it our listening tcp port if enable_tcp { let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port); - builder.tcp(tcp_port); + builder.tcp4(tcp_port); } builder } @@ -189,13 +189,13 @@ pub fn build_enr( /// If this function returns true, we use the `disk_enr`. fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { // take preference over disk_enr address if one is not specified - (local_enr.ip().is_none() || local_enr.ip() == disk_enr.ip()) + (local_enr.ip4().is_none() || local_enr.ip4() == disk_enr.ip4()) // tcp ports must match - && local_enr.tcp() == disk_enr.tcp() + && local_enr.tcp4() == disk_enr.tcp4() // must match on the same fork && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified - && (local_enr.udp().is_none() || local_enr.udp() == disk_enr.udp()) + && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, // otherwise we use a new ENR. This will likely only be true for non-validating nodes && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 1001efe231..e9cca6667a 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -48,14 +48,14 @@ impl EnrExt for Enr { /// The vector remains empty if these fields are not defined. fn multiaddr(&self) -> Vec { let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(udp) = self.udp() { + if let Some(ip) = self.ip4() { + if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); multiaddrs.push(multiaddr); } - if let Some(tcp) = self.tcp() { + if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); multiaddrs.push(multiaddr); @@ -84,15 +84,15 @@ impl EnrExt for Enr { fn multiaddr_p2p(&self) -> Vec { let peer_id = self.peer_id(); let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(udp) = self.udp() { + if let Some(ip) = self.ip4() { + if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); multiaddr.push(Protocol::P2p(peer_id.into())); multiaddrs.push(multiaddr); } - if let Some(tcp) = self.tcp() { + if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); multiaddr.push(Protocol::P2p(peer_id.into())); @@ -124,8 +124,8 @@ impl EnrExt for Enr { fn multiaddr_p2p_tcp(&self) -> Vec { let peer_id = self.peer_id(); let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(tcp) = self.tcp() { + if let Some(ip) = self.ip4() { + if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); multiaddr.push(Protocol::P2p(peer_id.into())); @@ -150,8 +150,8 @@ impl EnrExt for Enr { fn multiaddr_p2p_udp(&self) -> Vec { let peer_id = self.peer_id(); let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(udp) = self.udp() { + if let Some(ip) = self.ip4() { + if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); multiaddr.push(Protocol::P2p(peer_id.into())); @@ -173,8 +173,8 @@ impl EnrExt for Enr { /// The vector remains empty if these fields are not defined. fn multiaddr_tcp(&self) -> Vec { let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip() { - if let Some(tcp) = self.tcp() { + if let Some(ip) = self.ip4() { + if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); multiaddrs.push(multiaddr); @@ -232,6 +232,7 @@ impl CombinedKeyExt for CombinedKey { .expect("libp2p key must be valid"); Ok(CombinedKey::from(ed_keypair)) } + Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), } } } @@ -265,6 +266,10 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result Err(format!( + "Unsupported public key (Ecdsa) from peer {}", + peer_id + )), } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index d766fd23a3..3535c6bd9a 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -197,7 +197,9 @@ impl Discovery { let local_enr = network_globals.local_enr.read().clone(); - info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip" => ?local_enr.ip(), "udp"=> ?local_enr.udp(), "tcp" => ?local_enr.tcp()); + info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), + "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp6() + ); let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port); @@ -214,9 +216,9 @@ impl Discovery { "Adding node to routing table"; "node_id" => %bootnode_enr.node_id(), "peer_id" => %bootnode_enr.peer_id(), - "ip" => ?bootnode_enr.ip(), - "udp" => ?bootnode_enr.udp(), - "tcp" => ?bootnode_enr.tcp() + "ip" => ?bootnode_enr.ip4(), + "udp" => ?bootnode_enr.udp4(), + "tcp" => ?bootnode_enr.tcp4() ); let repr = bootnode_enr.to_string(); let _ = discv5.add_enr(bootnode_enr).map_err(|e| { @@ -268,9 +270,9 @@ impl Discovery { "Adding node to routing table"; "node_id" => %enr.node_id(), "peer_id" => %enr.peer_id(), - "ip" => ?enr.ip(), - "udp" => ?enr.udp(), - "tcp" => ?enr.tcp() + "ip" => ?enr.ip4(), + "udp" => ?enr.udp4(), + "tcp" => ?enr.tcp4() ); let _ = discv5.add_enr(enr).map_err(|e| { error!( @@ -763,7 +765,7 @@ impl Discovery { // we can connect to peers who aren't compatible with an upcoming fork. // `fork_digest` **must** be same. enr.eth2().map(|e| e.fork_digest) == Ok(enr_fork_id.fork_digest) - && (enr.tcp().is_some() || enr.tcp6().is_some()) + && (enr.tcp4().is_some() || enr.tcp6().is_some()) }; // General predicate @@ -1040,7 +1042,8 @@ impl NetworkBehaviour for Discovery { } Discv5Event::EnrAdded { .. } | Discv5Event::TalkRequest(_) - | Discv5Event::NodeInserted { .. } => {} // Ignore all other discv5 server events + | Discv5Event::NodeInserted { .. } + | Discv5Event::SessionEstablished { .. } => {} // Ignore all other discv5 server events } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index a19c6db657..c84e368f16 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -130,7 +130,7 @@ impl NetworkBehaviour for PeerManager { } // Check NAT if metrics are enabled - if self.network_globals.local_enr.read().udp().is_some() { + if self.network_globals.local_enr.read().udp4().is_some() { metrics::check_nat(); } diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 6b7f0bf48e..c4bf887e94 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -11,7 +11,10 @@ use types::EthSpec; pub async fn run(config: BootNodeConfig, log: slog::Logger) { // Print out useful information about the generated ENR - let enr_socket = config.local_enr.udp_socket().expect("Enr has a UDP socket"); + let enr_socket = config + .local_enr + .udp4_socket() + .expect("Enr has a UDP socket"); let eth2_field = config .local_enr .eth2() @@ -39,7 +42,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { info!( log, "Adding bootnode"; - "address" => ?enr.udp_socket(), + "address" => ?enr.udp4_socket(), "peer_id" => enr.peer_id().to_string(), "node_id" => enr.node_id().to_string() ); @@ -89,11 +92,12 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { // Ignore these events here } Discv5Event::EnrAdded { .. } => {} // Ignore - Discv5Event::TalkRequest(_) => {} // Ignore + Discv5Event::TalkRequest(_) => {} // Ignore Discv5Event::NodeInserted { .. } => {} // Ignore Discv5Event::SocketUpdated(socket_addr) => { info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr)); } + Discv5Event::SessionEstablished{ .. } => {} // Ignore } } } diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 7c3d183940..7be0e8f3d2 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -9,7 +9,7 @@ build = "build.rs" [build-dependencies] reqwest = { version = "0.11.0", features = ["blocking", "json", "native-tls-vendored"] } serde_json = "1.0.58" -sha2 = "0.9.1" +sha2 = "0.10" hex = "0.4.2" [dependencies] diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 32cee89f7f..6199005552 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -18,4 +18,4 @@ serde_yaml = "0.8.13" types = { path = "../../consensus/types"} eth2_ssz = "0.4.1" eth2_config = { path = "../eth2_config"} -enr = { version = "0.5.1", features = ["ed25519", "k256"] } +enr = { version = "0.6.2", features = ["ed25519", "k256"] } diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index 28f106fcbb..db296c70fe 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -10,7 +10,7 @@ description = "Hashing primitives used in Ethereum 2.0" lazy_static = { version = "1.4.0", optional = true } cpufeatures = { version = "0.2.5", optional = true } ring = "0.16.19" -sha2 = "0.10.2" +sha2 = "0.10" [dev-dependencies] rustc-hex = "2.1.0" diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index 43a8fe88b9..3f174a02d4 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sha2 = "0.9.1" +sha2 = "0.10" zeroize = { version = "1.4.2", features = ["zeroize_derive"] } num-bigint-dig = { version = "0.6.0", features = ["zeroize"] } ring = "0.16.19" diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index 98521c8fbb..d83a60da24 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -11,7 +11,7 @@ rand = "0.8.5" hmac = "0.11.0" pbkdf2 = { version = "0.8.0", default-features = false } scrypt = { version = "0.7.0", default-features = false } -sha2 = "0.9.1" +sha2 = "0.9.2" uuid = { version = "0.8.1", features = ["serde", "v4"] } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } serde = "1.0.116" diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 1c11ae046e..8c000bbb3d 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -56,7 +56,7 @@ impl CommandLineTestExec for CommandLineTest { fn enr_address_arg() { let mut test = CommandLineTest::new(); test.run_with_ip().with_config(|config| { - assert_eq!(config.local_enr.ip(), Some(IP_ADDRESS.parse().unwrap())); + assert_eq!(config.local_enr.ip4(), Some(IP_ADDRESS.parse().unwrap())); }); } @@ -127,7 +127,7 @@ fn enr_port_flag() { .flag("enr-port", Some(port.to_string().as_str())) .run_with_ip() .with_config(|config| { - assert_eq!(config.local_enr.udp(), Some(port)); + assert_eq!(config.local_enr.udp4(), Some(port)); }) } diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 7a8d7e99b5..a85138be95 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -15,8 +15,8 @@ execution_layer = { path = "../../beacon_node/execution_layer" } sensitive_url = { path = "../../common/sensitive_url" } types = { path = "../../consensus/types" } unused_port = { path = "../../common/unused_port" } -ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } -ethers-providers = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +ethers-core = "0.17.0" +ethers-providers = "0.17.0" deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" From e8604757a20461738686dd07dbc909834cb9c474 Mon Sep 17 00:00:00 2001 From: ethDreamer Date: Sun, 30 Oct 2022 04:04:24 +0000 Subject: [PATCH 21/21] Deposit Cache Finalization & Fast WS Sync (#2915) ## Summary The deposit cache now has the ability to finalize deposits. This will cause it to drop unneeded deposit logs and hashes in the deposit Merkle tree that are no longer required to construct deposit proofs. The cache is finalized whenever the latest finalized checkpoint has a new `Eth1Data` with all deposits imported. This has three benefits: 1. Improves the speed of constructing Merkle proofs for deposits as we can just replay deposits since the last finalized checkpoint instead of all historical deposits when re-constructing the Merkle tree. 2. Significantly faster weak subjectivity sync as the deposit cache can be transferred to the newly syncing node in compressed form. The Merkle tree that stores `N` finalized deposits requires a maximum of `log2(N)` hashes. The newly syncing node then only needs to download deposits since the last finalized checkpoint to have a full tree. 3. Future proofing in preparation for [EIP-4444](https://eips.ethereum.org/EIPS/eip-4444) as execution nodes will no longer be required to store logs permanently so we won't always have all historical logs available to us. ## More Details Image to illustrate how the deposit contract merkle tree evolves and finalizes along with the resulting `DepositTreeSnapshot` ![image](https://user-images.githubusercontent.com/37123614/151465302-5fc56284-8a69-4998-b20e-45db3934ac70.png) ## Other Considerations I've changed the structure of the `SszDepositCache` so once you load & save your database from this version of lighthouse, you will no longer be able to load it from older versions. Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> --- Cargo.lock | 3 +- beacon_node/beacon_chain/src/beacon_chain.rs | 70 +- .../beacon_chain/src/block_verification.rs | 8 + beacon_node/beacon_chain/src/builder.rs | 2 + beacon_node/beacon_chain/src/eth1_chain.rs | 25 +- .../src/eth1_finalization_cache.rs | 498 +++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/schema_change.rs | 96 +- .../src/schema_change/migration_schema_v13.rs | 150 ++++ beacon_node/beacon_chain/src/test_utils.rs | 5 +- beacon_node/client/src/builder.rs | 79 +- beacon_node/eth1/Cargo.toml | 1 + beacon_node/eth1/src/block_cache.rs | 52 +- beacon_node/eth1/src/deposit_cache.rs | 820 +++++++++++++++--- beacon_node/eth1/src/inner.rs | 46 +- beacon_node/eth1/src/lib.rs | 4 +- beacon_node/eth1/src/service.rs | 105 ++- beacon_node/eth1/tests/test.rs | 4 +- .../execution_layer/src/engine_api/http.rs | 14 +- beacon_node/genesis/src/common.rs | 4 +- .../genesis/src/eth1_genesis_service.rs | 2 +- beacon_node/http_api/src/lib.rs | 48 + beacon_node/store/src/metadata.rs | 2 +- common/eth2/src/lib.rs | 16 + common/eth2/src/lighthouse.rs | 18 +- consensus/merkle_proof/src/lib.rs | 185 +++- consensus/ssz/src/decode/impls.rs | 14 + consensus/ssz/src/encode/impls.rs | 36 + consensus/ssz/tests/tests.rs | 18 + .../src/common/deposit_data_tree.rs | 57 +- consensus/types/Cargo.toml | 2 +- consensus/types/src/deposit_tree_snapshot.rs | 83 ++ consensus/types/src/lib.rs | 2 + database_manager/src/lib.rs | 1 + validator_client/src/lib.rs | 2 + 35 files changed, 2302 insertions(+), 171 deletions(-) create mode 100644 beacon_node/beacon_chain/src/eth1_finalization_cache.rs create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs create mode 100644 consensus/types/src/deposit_tree_snapshot.rs diff --git a/Cargo.lock b/Cargo.lock index 04cfd42350..6d65ccb48c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1638,6 +1638,7 @@ dependencies = [ "slog", "sloggers", "state_processing", + "superstruct", "task_executor", "tokio", "tree_hash", @@ -6884,7 +6885,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "types" -version = "0.2.0" +version = "0.2.1" dependencies = [ "arbitrary", "beacon_chain", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 86b43a1a39..b23dd30de0 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -16,6 +16,7 @@ use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; +use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; @@ -117,6 +118,9 @@ pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// validator pubkey cache. pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); +/// The timeout for the eth1 finalization cache +pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); + // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -359,6 +363,8 @@ pub struct BeaconChain { pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: TimeoutRwLock, + /// A cache of eth1 deposit data at epoch boundaries for deposit finalization + pub eth1_finalization_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. @@ -2531,9 +2537,10 @@ impl BeaconChain { block, block_root, state, - parent_block: _, + parent_block, confirmed_state_roots, payload_verification_handle, + parent_eth1_finalization_data, } = execution_pending_block; let PayloadVerificationOutcome { @@ -2585,6 +2592,8 @@ impl BeaconChain { confirmed_state_roots, payload_verification_status, count_unrealized, + parent_block, + parent_eth1_finalization_data, ) }, "payload_verification_handle", @@ -2599,6 +2608,7 @@ impl BeaconChain { /// /// An error is returned if the block was unable to be imported. It may be partially imported /// (i.e., this function is not atomic). + #[allow(clippy::too_many_arguments)] fn import_block( &self, signed_block: Arc>, @@ -2607,6 +2617,8 @@ impl BeaconChain { confirmed_state_roots: Vec, payload_verification_status: PayloadVerificationStatus, count_unrealized: CountUnrealized, + parent_block: SignedBlindedBeaconBlock, + parent_eth1_finalization_data: Eth1FinalizationData, ) -> Result> { let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); @@ -2987,6 +2999,11 @@ impl BeaconChain { let parent_root = block.parent_root(); let slot = block.slot(); + let current_eth1_finalization_data = Eth1FinalizationData { + eth1_data: state.eth1_data().clone(), + eth1_deposit_index: state.eth1_deposit_index(), + }; + let current_finalized_checkpoint = state.finalized_checkpoint(); self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .ok_or(Error::SnapshotCacheLockTimeout) @@ -3060,6 +3077,57 @@ impl BeaconChain { ); } + // Do not write to eth1 finalization cache for blocks older than 5 epochs + // this helps reduce noise during sync + if block_delay_total + < self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32) + { + let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch()); + if parent_block_epoch < current_epoch { + // we've crossed epoch boundary, store Eth1FinalizationData + let (checkpoint, eth1_finalization_data) = + if current_slot % T::EthSpec::slots_per_epoch() == 0 { + // current block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: block_root, + }, + current_eth1_finalization_data, + ) + } else { + // parent block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: parent_block.canonical_root(), + }, + parent_eth1_finalization_data, + ) + }; + + if let Some(finalized_eth1_data) = self + .eth1_finalization_cache + .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) + .and_then(|mut cache| { + cache.insert(checkpoint, eth1_finalization_data); + cache.finalize(¤t_finalized_checkpoint) + }) + { + if let Some(eth1_chain) = self.eth1_chain.as_ref() { + let finalized_deposit_count = finalized_eth1_data.deposit_count; + eth1_chain.finalize_eth1_data(finalized_eth1_data); + debug!( + self.log, + "called eth1_chain.finalize_eth1_data()"; + "epoch" => current_finalized_checkpoint.epoch, + "deposit count" => finalized_deposit_count, + ); + } + } + } + } + // Inform the unknown block cache, in case it was waiting on this block. self.pre_finalization_block_cache .block_processed(block_root); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 7f59f1cfec..104de57dbf 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -42,6 +42,7 @@ //! END //! //! ``` +use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, PayloadNotifier, @@ -622,6 +623,7 @@ pub struct ExecutionPendingBlock { pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, + pub parent_eth1_finalization_data: Eth1FinalizationData, pub confirmed_state_roots: Vec, pub payload_verification_handle: PayloadVerificationHandle, } @@ -1164,6 +1166,11 @@ impl ExecutionPendingBlock { .into()); } + let parent_eth1_finalization_data = Eth1FinalizationData { + eth1_data: state.eth1_data().clone(), + eth1_deposit_index: state.eth1_deposit_index(), + }; + let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { let state_root = if parent.beacon_block.slot() == state.slot() { @@ -1419,6 +1426,7 @@ impl ExecutionPendingBlock { block_root, state, parent_block: parent.beacon_block, + parent_eth1_finalization_data, confirmed_state_roots, payload_verification_handle, }) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 051b84f816..58bbb2b5c6 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,5 +1,6 @@ use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; +use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::head_tracker::HeadTracker; @@ -795,6 +796,7 @@ where head_for_snapshot_cache, )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), + eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), beacon_proposer_cache: <_>::default(), block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 3d24becc84..25971bf85b 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -16,7 +16,6 @@ use store::{DBColumn, Error as StoreError, StoreItem}; use task_executor::TaskExecutor; use types::{ BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, - DEPOSIT_TREE_DEPTH, }; type BlockNumber = u64; @@ -170,8 +169,8 @@ fn get_sync_status( #[derive(Encode, Decode, Clone)] pub struct SszEth1 { - use_dummy_backend: bool, - backend_bytes: Vec, + pub use_dummy_backend: bool, + pub backend_bytes: Vec, } impl StoreItem for SszEth1 { @@ -305,6 +304,12 @@ where } } + /// Set in motion the finalization of `Eth1Data`. This method is called during block import + /// so it should be fast. + pub fn finalize_eth1_data(&self, eth1_data: Eth1Data) { + self.backend.finalize_eth1_data(eth1_data); + } + /// Consumes `self`, returning the backend. pub fn into_backend(self) -> T { self.backend @@ -335,6 +340,10 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { /// beacon node eth1 cache is. fn latest_cached_block(&self) -> Option; + /// Set in motion the finalization of `Eth1Data`. This method is called during block import + /// so it should be fast. + fn finalize_eth1_data(&self, eth1_data: Eth1Data); + /// Returns the block at the head of the chain (ignoring follow distance, etc). Used to obtain /// an idea of how up-to-date the remote eth1 node is. fn head_block(&self) -> Option; @@ -389,6 +398,8 @@ impl Eth1ChainBackend for DummyEth1ChainBackend { None } + fn finalize_eth1_data(&self, _eth1_data: Eth1Data) {} + fn head_block(&self) -> Option { None } @@ -547,7 +558,7 @@ impl Eth1ChainBackend for CachingEth1Backend { .deposits() .read() .cache - .get_deposits(next, last, deposit_count, DEPOSIT_TREE_DEPTH) + .get_deposits(next, last, deposit_count) .map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e))) .map(|(_deposit_root, deposits)| deposits) } @@ -558,6 +569,12 @@ impl Eth1ChainBackend for CachingEth1Backend { self.core.latest_cached_block() } + /// This only writes the eth1_data to a temporary cache so that the service + /// thread can later do the actual finalizing of the deposit tree. + fn finalize_eth1_data(&self, eth1_data: Eth1Data) { + self.core.set_to_finalize(Some(eth1_data)); + } + fn head_block(&self) -> Option { self.core.head_block() } diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs new file mode 100644 index 0000000000..7cf805a126 --- /dev/null +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -0,0 +1,498 @@ +use slog::{debug, Logger}; +use std::cmp; +use std::collections::BTreeMap; +use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root}; + +/// The default size of the cache. +/// The beacon chain only looks at the last 4 epochs for finalization. +/// Add 1 for current epoch and 4 earlier epochs. +pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5; + +/// These fields are named the same as the corresponding fields in the `BeaconState` +/// as this structure stores these values from the `BeaconState` at a `Checkpoint` +#[derive(Clone)] +pub struct Eth1FinalizationData { + pub eth1_data: Eth1Data, + pub eth1_deposit_index: u64, +} + +impl Eth1FinalizationData { + /// Ensures the deposit finalization conditions have been met. See: + /// https://eips.ethereum.org/EIPS/eip-4881#deposit-finalization-conditions + fn fully_imported(&self) -> bool { + self.eth1_deposit_index >= self.eth1_data.deposit_count + } +} + +/// Implements map from Checkpoint -> Eth1CacheData +pub struct CheckpointMap { + capacity: usize, + // There shouldn't be more than a couple of potential checkpoints at the same + // epoch. Searching through a vector for the matching Root should be faster + // than using another map from Root->Eth1CacheData + store: BTreeMap>, +} + +impl Default for CheckpointMap { + fn default() -> Self { + Self::new() + } +} + +/// Provides a map of `Eth1CacheData` referenced by `Checkpoint` +/// +/// ## Cache Queuing +/// +/// The cache keeps a maximum number of (`capacity`) epochs. Because there may be +/// forks at the epoch boundary, it's possible that there exists more than one +/// `Checkpoint` for the same `Epoch`. This cache will store all checkpoints for +/// a given `Epoch`. When adding data for a new `Checkpoint` would cause the number +/// of `Epoch`s stored to exceed `capacity`, the data for oldest `Epoch` is dropped +impl CheckpointMap { + pub fn new() -> Self { + CheckpointMap { + capacity: DEFAULT_ETH1_CACHE_SIZE, + store: BTreeMap::new(), + } + } + + pub fn with_capacity(capacity: usize) -> Self { + CheckpointMap { + capacity: cmp::max(1, capacity), + store: BTreeMap::new(), + } + } + + pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { + self.store + .entry(checkpoint.epoch) + .or_insert_with(Vec::new) + .push((checkpoint.root, eth1_finalization_data)); + + // faster to reduce size after the fact than do pre-checking to see + // if the current data would increase the size of the BTreeMap + while self.store.len() > self.capacity { + let oldest_stored_epoch = self.store.keys().next().cloned().unwrap(); + self.store.remove(&oldest_stored_epoch); + } + } + + pub fn get(&self, checkpoint: &Checkpoint) -> Option<&Eth1FinalizationData> { + match self.store.get(&checkpoint.epoch) { + Some(vec) => { + for (root, data) in vec { + if *root == checkpoint.root { + return Some(data); + } + } + None + } + None => None, + } + } + + #[cfg(test)] + pub fn len(&self) -> usize { + self.store.len() + } +} + +/// This cache stores `Eth1CacheData` that could potentially be finalized within 4 +/// future epochs. +pub struct Eth1FinalizationCache { + by_checkpoint: CheckpointMap, + pending_eth1: BTreeMap, + last_finalized: Option, + log: Logger, +} + +/// Provides a cache of `Eth1CacheData` at epoch boundaries. This is used to +/// finalize deposits when a new epoch is finalized. +/// +impl Eth1FinalizationCache { + pub fn new(log: Logger) -> Self { + Eth1FinalizationCache { + by_checkpoint: CheckpointMap::new(), + pending_eth1: BTreeMap::new(), + last_finalized: None, + log, + } + } + + pub fn with_capacity(log: Logger, capacity: usize) -> Self { + Eth1FinalizationCache { + by_checkpoint: CheckpointMap::with_capacity(capacity), + pending_eth1: BTreeMap::new(), + last_finalized: None, + log, + } + } + + pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { + if !eth1_finalization_data.fully_imported() { + self.pending_eth1.insert( + eth1_finalization_data.eth1_data.deposit_count, + eth1_finalization_data.eth1_data.clone(), + ); + debug!( + self.log, + "Eth1Cache: inserted pending eth1"; + "eth1_data.deposit_count" => eth1_finalization_data.eth1_data.deposit_count, + "eth1_deposit_index" => eth1_finalization_data.eth1_deposit_index, + ); + } + self.by_checkpoint + .insert(checkpoint, eth1_finalization_data); + } + + pub fn finalize(&mut self, checkpoint: &Checkpoint) -> Option { + if let Some(eth1_finalized_data) = self.by_checkpoint.get(checkpoint) { + let finalized_deposit_index = eth1_finalized_data.eth1_deposit_index; + let mut result = None; + while let Some(pending_count) = self.pending_eth1.keys().next().cloned() { + if finalized_deposit_index >= pending_count { + result = self.pending_eth1.remove(&pending_count); + debug!( + self.log, + "Eth1Cache: dropped pending eth1"; + "pending_count" => pending_count, + "finalized_deposit_index" => finalized_deposit_index, + ); + } else { + break; + } + } + if eth1_finalized_data.fully_imported() { + result = Some(eth1_finalized_data.eth1_data.clone()) + } + if result.is_some() { + self.last_finalized = result; + } + self.last_finalized.clone() + } else { + debug!( + self.log, + "Eth1Cache: cache miss"; + "epoch" => checkpoint.epoch, + ); + None + } + } + + #[cfg(test)] + pub fn by_checkpoint(&self) -> &CheckpointMap { + &self.by_checkpoint + } + + #[cfg(test)] + pub fn pending_eth1(&self) -> &BTreeMap { + &self.pending_eth1 + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use sloggers::null::NullLoggerBuilder; + use sloggers::Build; + use std::collections::HashMap; + + const SLOTS_PER_EPOCH: u64 = 32; + const MAX_DEPOSITS: u64 = 16; + const EPOCHS_PER_ETH1_VOTING_PERIOD: u64 = 64; + + fn eth1cache() -> Eth1FinalizationCache { + let log_builder = NullLoggerBuilder; + Eth1FinalizationCache::new(log_builder.build().expect("should build log")) + } + + fn random_eth1_data(deposit_count: u64) -> Eth1Data { + Eth1Data { + deposit_root: Root::random(), + deposit_count, + block_hash: Root::random(), + } + } + + fn random_checkpoint(epoch: u64) -> Checkpoint { + Checkpoint { + epoch: epoch.into(), + root: Root::random(), + } + } + + fn random_checkpoints(n: usize) -> Vec { + let mut result = Vec::with_capacity(n); + for epoch in 0..n { + result.push(random_checkpoint(epoch as u64)) + } + result + } + + #[test] + fn fully_imported_deposits() { + let epochs = 16; + let deposits_imported = 128; + + let eth1data = random_eth1_data(deposits_imported); + let checkpoints = random_checkpoints(epochs as usize); + let mut eth1cache = eth1cache(); + + for epoch in 4..epochs { + assert_eq!( + eth1cache.by_checkpoint().len(), + cmp::min((epoch - 4) as usize, DEFAULT_ETH1_CACHE_SIZE), + "Unexpected cache size" + ); + + let checkpoint = checkpoints + .get(epoch as usize) + .expect("should get checkpoint"); + eth1cache.insert( + *checkpoint, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + + let finalized_checkpoint = checkpoints + .get((epoch - 4) as usize) + .expect("should get finalized checkpoint"); + assert!( + eth1cache.pending_eth1().is_empty(), + "Deposits are fully imported so pending cache should be empty" + ); + if epoch < 8 { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + None, + "Should have cache miss" + ); + } else { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + Some(eth1data.clone()), + "Should have cache hit" + ) + } + } + } + + #[test] + fn partially_imported_deposits() { + let epochs = 16; + let initial_deposits_imported = 1024; + let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; + let full_import_epoch = 13; + let total_deposits = + initial_deposits_imported + deposits_imported_per_epoch * full_import_epoch; + + let eth1data = random_eth1_data(total_deposits); + let checkpoints = random_checkpoints(epochs as usize); + let mut eth1cache = eth1cache(); + + for epoch in 0..epochs { + assert_eq!( + eth1cache.by_checkpoint().len(), + cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), + "Unexpected cache size" + ); + + let checkpoint = checkpoints + .get(epoch as usize) + .expect("should get checkpoint"); + let deposits_imported = cmp::min( + total_deposits, + initial_deposits_imported + deposits_imported_per_epoch * epoch, + ); + eth1cache.insert( + *checkpoint, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + + if epoch >= 4 { + let finalized_epoch = epoch - 4; + let finalized_checkpoint = checkpoints + .get(finalized_epoch as usize) + .expect("should get finalized checkpoint"); + if finalized_epoch < full_import_epoch { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + None, + "Deposits not fully finalized so cache should return no Eth1Data", + ); + assert_eq!( + eth1cache.pending_eth1().len(), + 1, + "Deposits not fully finalized. Pending eth1 cache should have 1 entry" + ); + } else { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + Some(eth1data.clone()), + "Deposits fully imported and finalized. Cache should return Eth1Data. finalized_deposits[{}]", + (initial_deposits_imported + deposits_imported_per_epoch * finalized_epoch), + ); + assert!( + eth1cache.pending_eth1().is_empty(), + "Deposits fully imported and finalized. Pending cache should be empty" + ); + } + } + } + } + + #[test] + fn fork_at_epoch_boundary() { + let epochs = 12; + let deposits_imported = 128; + + let eth1data = random_eth1_data(deposits_imported); + let checkpoints = random_checkpoints(epochs as usize); + let mut forks = HashMap::new(); + let mut eth1cache = eth1cache(); + + for epoch in 0..epochs { + assert_eq!( + eth1cache.by_checkpoint().len(), + cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), + "Unexpected cache size" + ); + + let checkpoint = checkpoints + .get(epoch as usize) + .expect("should get checkpoint"); + eth1cache.insert( + *checkpoint, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + // lets put a fork at every third epoch + if epoch % 3 == 0 { + let fork = random_checkpoint(epoch); + eth1cache.insert( + fork, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + forks.insert(epoch as usize, fork); + } + + assert!( + eth1cache.pending_eth1().is_empty(), + "Deposits are fully imported so pending cache should be empty" + ); + if epoch >= 4 { + let finalized_epoch = (epoch - 4) as usize; + let finalized_checkpoint = if finalized_epoch % 3 == 0 { + forks.get(&finalized_epoch).expect("should get fork") + } else { + checkpoints + .get(finalized_epoch) + .expect("should get checkpoint") + }; + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + Some(eth1data.clone()), + "Should have cache hit" + ); + if finalized_epoch >= 3 { + let dropped_epoch = finalized_epoch - 3; + if let Some(dropped_checkpoint) = forks.get(&dropped_epoch) { + // got checkpoint for an old fork that should no longer + // be in the cache because it is from too long ago + assert_eq!( + eth1cache.finalize(dropped_checkpoint), + None, + "Should have cache miss" + ); + } + } + } + } + } + + #[test] + fn massive_deposit_queue() { + // Simulating a situation where deposits don't get imported within an eth1 voting period + let eth1_voting_periods = 8; + let initial_deposits_imported = 1024; + let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; + let initial_deposit_queue = + deposits_imported_per_epoch * EPOCHS_PER_ETH1_VOTING_PERIOD * 2 + 32; + let new_deposits_per_voting_period = + EPOCHS_PER_ETH1_VOTING_PERIOD * deposits_imported_per_epoch / 2; + + let mut epoch_data = BTreeMap::new(); + let mut eth1s_by_count = BTreeMap::new(); + let mut eth1cache = eth1cache(); + let mut last_period_deposits = initial_deposits_imported; + for period in 0..eth1_voting_periods { + let period_deposits = initial_deposits_imported + + initial_deposit_queue + + period * new_deposits_per_voting_period; + let period_eth1_data = random_eth1_data(period_deposits); + eth1s_by_count.insert(period_eth1_data.deposit_count, period_eth1_data.clone()); + + for epoch_mod_period in 0..EPOCHS_PER_ETH1_VOTING_PERIOD { + let epoch = period * EPOCHS_PER_ETH1_VOTING_PERIOD + epoch_mod_period; + let checkpoint = random_checkpoint(epoch); + let deposits_imported = cmp::min( + period_deposits, + last_period_deposits + deposits_imported_per_epoch * epoch_mod_period, + ); + eth1cache.insert( + checkpoint, + Eth1FinalizationData { + eth1_data: period_eth1_data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + epoch_data.insert(epoch, (checkpoint, deposits_imported)); + + if epoch >= 4 { + let finalized_epoch = epoch - 4; + let (finalized_checkpoint, finalized_deposits) = epoch_data + .get(&finalized_epoch) + .expect("should get epoch data"); + + let pending_eth1s = eth1s_by_count.range((finalized_deposits + 1)..).count(); + let last_finalized_eth1 = eth1s_by_count + .range(0..(finalized_deposits + 1)) + .map(|(_, eth1)| eth1) + .last() + .cloned(); + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + last_finalized_eth1, + "finalized checkpoint mismatch", + ); + assert_eq!( + eth1cache.pending_eth1().len(), + pending_eth1s, + "pending eth1 mismatch" + ); + } + } + + // remove unneeded stuff from old epochs + while epoch_data.len() > DEFAULT_ETH1_CACHE_SIZE { + let oldest_stored_epoch = epoch_data + .keys() + .next() + .cloned() + .expect("should get oldest epoch"); + epoch_data.remove(&oldest_stored_epoch); + } + last_period_deposits = period_deposits; + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fbcd8f7fb7..5ead5311e5 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -14,6 +14,7 @@ pub mod chain_config; mod early_attester_cache; mod errors; pub mod eth1_chain; +mod eth1_finalization_cache; pub mod events; pub mod execution_payload; pub mod fork_choice_signal; diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 15b0f39f3a..3ee77f7bbd 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -2,13 +2,15 @@ mod migration_schema_v10; mod migration_schema_v11; mod migration_schema_v12; +mod migration_schema_v13; mod migration_schema_v6; mod migration_schema_v7; mod migration_schema_v8; mod migration_schema_v9; mod types; -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; +use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY}; +use crate::eth1_chain::SszEth1; use crate::persisted_fork_choice::{ PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7, PersistedForkChoiceV8, @@ -24,6 +26,7 @@ use store::{Error as StoreError, StoreItem}; /// Migrate the database from one schema version to another, applying all requisite mutations. pub fn migrate_schema( db: Arc>, + deposit_contract_deploy_block: u64, datadir: &Path, from: SchemaVersion, to: SchemaVersion, @@ -31,19 +34,51 @@ pub fn migrate_schema( spec: &ChainSpec, ) -> Result<(), StoreError> { match (from, to) { - // Migrating from the current schema version to iself is always OK, a no-op. + // Migrating from the current schema version to itself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; - migrate_schema::(db, datadir, next, to, log, spec) + migrate_schema::( + db.clone(), + deposit_contract_deploy_block, + datadir, + from, + next, + log.clone(), + spec, + )?; + migrate_schema::( + db, + deposit_contract_deploy_block, + datadir, + next, + to, + log, + spec, + ) } // Downgrade across multiple versions by recursively migrating one step at a time. (_, _) if to.as_u64() + 1 < from.as_u64() => { let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; - migrate_schema::(db, datadir, next, to, log, spec) + migrate_schema::( + db.clone(), + deposit_contract_deploy_block, + datadir, + from, + next, + log.clone(), + spec, + )?; + migrate_schema::( + db, + deposit_contract_deploy_block, + datadir, + next, + to, + log, + spec, + ) } // @@ -207,6 +242,55 @@ pub fn migrate_schema( let ops = migration_schema_v12::downgrade_from_v12::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(12), SchemaVersion(13)) => { + let mut ops = vec![]; + if let Some(persisted_eth1_v1) = db.get_item::(Ð1_CACHE_DB_KEY)? { + let upgraded_eth1_cache = + match migration_schema_v13::update_eth1_cache(persisted_eth1_v1) { + Ok(upgraded_eth1) => upgraded_eth1, + Err(e) => { + warn!(log, "Failed to deserialize SszEth1CacheV1"; "error" => ?e); + warn!(log, "Reinitializing eth1 cache"); + migration_schema_v13::reinitialized_eth1_cache_v13( + deposit_contract_deploy_block, + ) + } + }; + ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + (SchemaVersion(13), SchemaVersion(12)) => { + let mut ops = vec![]; + if let Some(persisted_eth1_v13) = db.get_item::(Ð1_CACHE_DB_KEY)? { + let downgraded_eth1_cache = match migration_schema_v13::downgrade_eth1_cache( + persisted_eth1_v13, + ) { + Ok(Some(downgraded_eth1)) => downgraded_eth1, + Ok(None) => { + warn!(log, "Unable to downgrade eth1 cache from newer version: reinitializing eth1 cache"); + migration_schema_v13::reinitialized_eth1_cache_v1( + deposit_contract_deploy_block, + ) + } + Err(e) => { + warn!(log, "Unable to downgrade eth1 cache from newer version: failed to deserialize SszEth1CacheV13"; "error" => ?e); + warn!(log, "Reinitializing eth1 cache"); + migration_schema_v13::reinitialized_eth1_cache_v1( + deposit_contract_deploy_block, + ) + } + }; + ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs new file mode 100644 index 0000000000..d4ac974603 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs @@ -0,0 +1,150 @@ +use crate::eth1_chain::SszEth1; +use eth1::{BlockCache, SszDepositCacheV1, SszDepositCacheV13, SszEth1CacheV1, SszEth1CacheV13}; +use ssz::{Decode, Encode}; +use state_processing::common::DepositDataTree; +use store::Error; +use types::DEPOSIT_TREE_DEPTH; + +pub fn update_eth1_cache(persisted_eth1_v1: SszEth1) -> Result { + if persisted_eth1_v1.use_dummy_backend { + // backend_bytes is empty when using dummy backend + return Ok(persisted_eth1_v1); + } + + let SszEth1 { + use_dummy_backend, + backend_bytes, + } = persisted_eth1_v1; + + let ssz_eth1_cache_v1 = SszEth1CacheV1::from_ssz_bytes(&backend_bytes)?; + let SszEth1CacheV1 { + block_cache, + deposit_cache: deposit_cache_v1, + last_processed_block, + } = ssz_eth1_cache_v1; + + let SszDepositCacheV1 { + logs, + leaves, + deposit_contract_deploy_block, + deposit_roots, + } = deposit_cache_v1; + + let deposit_cache_v13 = SszDepositCacheV13 { + logs, + leaves, + deposit_contract_deploy_block, + finalized_deposit_count: 0, + finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), + deposit_tree_snapshot: None, + deposit_roots, + }; + + let ssz_eth1_cache_v13 = SszEth1CacheV13 { + block_cache, + deposit_cache: deposit_cache_v13, + last_processed_block, + }; + + let persisted_eth1_v13 = SszEth1 { + use_dummy_backend, + backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), + }; + + Ok(persisted_eth1_v13) +} + +pub fn downgrade_eth1_cache(persisted_eth1_v13: SszEth1) -> Result, Error> { + if persisted_eth1_v13.use_dummy_backend { + // backend_bytes is empty when using dummy backend + return Ok(Some(persisted_eth1_v13)); + } + + let SszEth1 { + use_dummy_backend, + backend_bytes, + } = persisted_eth1_v13; + + let ssz_eth1_cache_v13 = SszEth1CacheV13::from_ssz_bytes(&backend_bytes)?; + let SszEth1CacheV13 { + block_cache, + deposit_cache: deposit_cache_v13, + last_processed_block, + } = ssz_eth1_cache_v13; + + let SszDepositCacheV13 { + logs, + leaves, + deposit_contract_deploy_block, + finalized_deposit_count, + finalized_block_height: _, + deposit_tree_snapshot, + deposit_roots, + } = deposit_cache_v13; + + if finalized_deposit_count == 0 && deposit_tree_snapshot.is_none() { + // This tree was never finalized and can be directly downgraded to v1 without re-initializing + let deposit_cache_v1 = SszDepositCacheV1 { + logs, + leaves, + deposit_contract_deploy_block, + deposit_roots, + }; + let ssz_eth1_cache_v1 = SszEth1CacheV1 { + block_cache, + deposit_cache: deposit_cache_v1, + last_processed_block, + }; + return Ok(Some(SszEth1 { + use_dummy_backend, + backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), + })); + } + // deposit cache was finalized; can't downgrade + Ok(None) +} + +pub fn reinitialized_eth1_cache_v13(deposit_contract_deploy_block: u64) -> SszEth1 { + let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); + let deposit_cache_v13 = SszDepositCacheV13 { + logs: vec![], + leaves: vec![], + deposit_contract_deploy_block, + finalized_deposit_count: 0, + finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), + deposit_tree_snapshot: empty_tree.get_snapshot(), + deposit_roots: vec![empty_tree.root()], + }; + + let ssz_eth1_cache_v13 = SszEth1CacheV13 { + block_cache: BlockCache::default(), + deposit_cache: deposit_cache_v13, + last_processed_block: None, + }; + + SszEth1 { + use_dummy_backend: false, + backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), + } +} + +pub fn reinitialized_eth1_cache_v1(deposit_contract_deploy_block: u64) -> SszEth1 { + let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); + let deposit_cache_v1 = SszDepositCacheV1 { + logs: vec![], + leaves: vec![], + deposit_contract_deploy_block, + deposit_roots: vec![empty_tree.root()], + }; + + let ssz_eth1_cache_v1 = SszEth1CacheV1 { + block_cache: BlockCache::default(), + deposit_cache: deposit_cache_v1, + last_processed_block: None, + }; + + SszEth1 { + use_dummy_backend: false, + backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d5a8880381..3b4a62f5a9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1432,8 +1432,9 @@ where // Building proofs let mut proofs = vec![]; for i in 0..leaves.len() { - let (_, mut proof) = - tree.generate_proof(i, self.spec.deposit_contract_tree_depth as usize); + let (_, mut proof) = tree + .generate_proof(i, self.spec.deposit_contract_tree_depth as usize) + .expect("should generate proof"); proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64))); proofs.push(proof); } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index efd91cfdf6..c89980e6e8 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -277,8 +277,52 @@ where BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT)); let slots_per_epoch = TEthSpec::slots_per_epoch(); - debug!(context.log(), "Downloading finalized block"); + let deposit_snapshot = if config.sync_eth1_chain { + // We want to fetch deposit snapshot before fetching the finalized beacon state to + // ensure that the snapshot is not newer than the beacon state that satisfies the + // deposit finalization conditions + debug!(context.log(), "Downloading deposit snapshot"); + let deposit_snapshot_result = remote + .get_deposit_snapshot() + .await + .map_err(|e| match e { + ApiError::InvalidSsz(e) => format!( + "Unable to parse SSZ: {:?}. Ensure the checkpoint-sync-url refers to a \ + node for the correct network", + e + ), + e => format!("Error fetching deposit snapshot from remote: {:?}", e), + }); + match deposit_snapshot_result { + Ok(Some(deposit_snapshot)) => { + if deposit_snapshot.is_valid() { + Some(deposit_snapshot) + } else { + warn!(context.log(), "Remote BN sent invalid deposit snapshot!"); + None + } + } + Ok(None) => { + warn!( + context.log(), + "Remote BN does not support EIP-4881 fast deposit sync" + ); + None + } + Err(e) => { + warn!( + context.log(), + "Remote BN does not support EIP-4881 fast deposit sync"; + "error" => e + ); + None + } + } + } else { + None + }; + debug!(context.log(), "Downloading finalized block"); // Find a suitable finalized block on an epoch boundary. let mut block = remote .get_beacon_blocks_ssz::(BlockId::Finalized, &spec) @@ -362,9 +406,33 @@ where "state_root" => ?state_root, ); + let service = + deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot( + config.eth1, + context.log().clone(), + spec, + &snapshot, + ) { + Ok(service) => { + info!( + context.log(), + "Loaded deposit tree snapshot"; + "deposits loaded" => snapshot.deposit_count, + ); + Some(service) + } + Err(e) => { + warn!(context.log(), + "Unable to load deposit snapshot"; + "error" => ?e + ); + None + } + }); + builder .weak_subjectivity_state(state, block, genesis_state) - .map(|v| (v, None))? + .map(|v| (v, service))? } ClientGenesis::DepositContract => { info!( @@ -810,9 +878,16 @@ where self.freezer_db_path = Some(cold_path.into()); let inner_spec = spec.clone(); + let deposit_contract_deploy_block = context + .eth2_network_config + .as_ref() + .map(|config| config.deposit_contract_deploy_block) + .unwrap_or(0); + let schema_upgrade = |db, from, to| { migrate_schema::>( db, + deposit_contract_deploy_block, datadir, from, to, diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 930301256c..7e99c43e7d 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -25,6 +25,7 @@ eth2_ssz_derive = "0.3.0" tree_hash = "0.4.1" parking_lot = "0.12.0" slog = "2.5.2" +superstruct = "0.5.0" tokio = { version = "1.14.0", features = ["full"] } state_processing = { path = "../../consensus/state_processing" } lighthouse_metrics = { path = "../../common/lighthouse_metrics"} diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index 5999944f4a..26e160115e 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -1,7 +1,10 @@ use ssz_derive::{Decode, Encode}; +use std::collections::HashMap; use std::ops::RangeInclusive; pub use eth2::lighthouse::Eth1Block; +use eth2::types::Hash256; +use std::sync::Arc; #[derive(Debug, PartialEq, Clone)] pub enum Error { @@ -20,7 +23,9 @@ pub enum Error { /// timestamp. #[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] pub struct BlockCache { - blocks: Vec, + blocks: Vec>, + #[ssz(skip_serializing, skip_deserializing)] + by_hash: HashMap>, } impl BlockCache { @@ -36,12 +41,12 @@ impl BlockCache { /// Returns the earliest (lowest timestamp) block, if any. pub fn earliest_block(&self) -> Option<&Eth1Block> { - self.blocks.first() + self.blocks.first().map(|ptr| ptr.as_ref()) } /// Returns the latest (highest timestamp) block, if any. pub fn latest_block(&self) -> Option<&Eth1Block> { - self.blocks.last() + self.blocks.last().map(|ptr| ptr.as_ref()) } /// Returns the timestamp of the earliest block in the cache (if any). @@ -71,7 +76,7 @@ impl BlockCache { /// - Monotonically increasing block numbers. /// - Non-uniformly increasing block timestamps. pub fn iter(&self) -> impl DoubleEndedIterator + Clone { - self.blocks.iter() + self.blocks.iter().map(|ptr| ptr.as_ref()) } /// Shortens the cache, keeping the latest (by block number) `len` blocks while dropping the @@ -80,7 +85,11 @@ impl BlockCache { /// If `len` is greater than the vector's current length, this has no effect. pub fn truncate(&mut self, len: usize) { if len < self.blocks.len() { - self.blocks = self.blocks.split_off(self.blocks.len() - len); + let remaining = self.blocks.split_off(self.blocks.len() - len); + for block in &self.blocks { + self.by_hash.remove(&block.hash); + } + self.blocks = remaining; } } @@ -92,12 +101,27 @@ impl BlockCache { /// Returns a block with the corresponding number, if any. pub fn block_by_number(&self, block_number: u64) -> Option<&Eth1Block> { - self.blocks.get( - self.blocks - .as_slice() - .binary_search_by(|block| block.number.cmp(&block_number)) - .ok()?, - ) + self.blocks + .get( + self.blocks + .as_slice() + .binary_search_by(|block| block.number.cmp(&block_number)) + .ok()?, + ) + .map(|ptr| ptr.as_ref()) + } + + /// Returns a block with the corresponding hash, if any. + pub fn block_by_hash(&self, block_hash: &Hash256) -> Option<&Eth1Block> { + self.by_hash.get(block_hash).map(|ptr| ptr.as_ref()) + } + + /// Rebuilds the by_hash map + pub fn rebuild_by_hash_map(&mut self) { + self.by_hash.clear(); + for block in self.blocks.iter() { + self.by_hash.insert(block.hash, block.clone()); + } } /// Insert an `Eth1Snapshot` into `self`, allowing future queries. @@ -161,7 +185,9 @@ impl BlockCache { } } - self.blocks.push(block); + let ptr = Arc::new(block); + self.by_hash.insert(ptr.hash, ptr.clone()); + self.blocks.push(ptr); Ok(()) } @@ -269,6 +295,8 @@ mod tests { .expect("should add consecutive blocks with duplicate timestamps"); } + let blocks = blocks.into_iter().map(Arc::new).collect::>(); + assert_eq!(cache.blocks, blocks, "should have added all blocks"); } } diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index 078e3602f5..ab07b380d1 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -1,9 +1,10 @@ -use execution_layer::http::deposit_log::DepositLog; +use crate::{DepositLog, Eth1Block}; use ssz_derive::{Decode, Encode}; use state_processing::common::DepositDataTree; use std::cmp::Ordering; +use superstruct::superstruct; use tree_hash::TreeHash; -use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH}; +use types::{Deposit, DepositTreeSnapshot, Hash256, DEPOSIT_TREE_DEPTH}; #[derive(Debug, PartialEq)] pub enum Error { @@ -21,22 +22,53 @@ pub enum Error { /// A log with the given index is already present in the cache and it does not match the one /// provided. DuplicateDistinctLog(u64), + /// Attempted to insert log with given index after the log had been finalized + FinalizedLogInsert { + log_index: u64, + finalized_index: u64, + }, /// The deposit count must always be large enough to account for the requested deposit range. /// /// E.g., you cannot request deposit 10 when the deposit count is 9. DepositCountInvalid { deposit_count: u64, range_end: u64 }, + /// You can't request deposits on or before the finalized deposit + DepositRangeInvalid { + range_start: u64, + finalized_count: u64, + }, + /// You can't finalize what's already been finalized and the cache must have the logs + /// that you wish to finalize + InvalidFinalizeIndex { + requested_count: u64, + currently_finalized: u64, + deposit_count: u64, + }, /// Error with the merkle tree for deposits. DepositTree(merkle_proof::MerkleTreeError), /// An unexpected condition was encountered. Internal(String), + /// This is for errors that should never occur + PleaseNotifyTheDevs, } -#[derive(Encode, Decode, Clone)] +pub type SszDepositCache = SszDepositCacheV13; + +#[superstruct( + variants(V1, V13), + variant_attributes(derive(Encode, Decode, Clone)), + no_enum +)] pub struct SszDepositCache { - logs: Vec, - leaves: Vec, - deposit_contract_deploy_block: u64, - deposit_roots: Vec, + pub logs: Vec, + pub leaves: Vec, + pub deposit_contract_deploy_block: u64, + #[superstruct(only(V13))] + pub finalized_deposit_count: u64, + #[superstruct(only(V13))] + pub finalized_block_height: u64, + #[superstruct(only(V13))] + pub deposit_tree_snapshot: Option, + pub deposit_roots: Vec, } impl SszDepositCache { @@ -45,13 +77,37 @@ impl SszDepositCache { logs: cache.logs.clone(), leaves: cache.leaves.clone(), deposit_contract_deploy_block: cache.deposit_contract_deploy_block, + finalized_deposit_count: cache.finalized_deposit_count, + finalized_block_height: cache.finalized_block_height, + deposit_tree_snapshot: cache.deposit_tree.get_snapshot(), deposit_roots: cache.deposit_roots.clone(), } } pub fn to_deposit_cache(&self) -> Result { - let deposit_tree = - DepositDataTree::create(&self.leaves, self.leaves.len(), DEPOSIT_TREE_DEPTH); + let deposit_tree = self + .deposit_tree_snapshot + .as_ref() + .map(|snapshot| { + let mut tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) + .map_err(|e| format!("Invalid SszDepositCache: {:?}", e))?; + for leaf in &self.leaves { + tree.push_leaf(*leaf).map_err(|e| { + format!("Invalid SszDepositCache: unable to push leaf: {:?}", e) + })?; + } + Ok::<_, String>(tree) + }) + .unwrap_or_else(|| { + // deposit_tree_snapshot = None (tree was never finalized) + // Create DepositDataTree from leaves + Ok(DepositDataTree::create( + &self.leaves, + self.leaves.len(), + DEPOSIT_TREE_DEPTH, + )) + })?; + // Check for invalid SszDepositCache conditions if self.leaves.len() != self.logs.len() { return Err("Invalid SszDepositCache: logs and leaves should have equal length".into()); @@ -67,6 +123,8 @@ impl SszDepositCache { logs: self.logs.clone(), leaves: self.leaves.clone(), deposit_contract_deploy_block: self.deposit_contract_deploy_block, + finalized_deposit_count: self.finalized_deposit_count, + finalized_block_height: self.finalized_block_height, deposit_tree, deposit_roots: self.deposit_roots.clone(), }) @@ -76,10 +134,13 @@ impl SszDepositCache { /// Mirrors the merkle tree of deposits in the eth1 deposit contract. /// /// Provides `Deposit` objects with merkle proofs included. +#[cfg_attr(test, derive(PartialEq))] pub struct DepositCache { logs: Vec, leaves: Vec, deposit_contract_deploy_block: u64, + finalized_deposit_count: u64, + finalized_block_height: u64, /// An incremental merkle tree which represents the current state of the /// deposit contract tree. deposit_tree: DepositDataTree, @@ -96,6 +157,8 @@ impl Default for DepositCache { logs: Vec::new(), leaves: Vec::new(), deposit_contract_deploy_block: 1, + finalized_deposit_count: 0, + finalized_block_height: 0, deposit_tree, deposit_roots, } @@ -114,33 +177,111 @@ impl DepositCache { pub fn new(deposit_contract_deploy_block: u64) -> Self { DepositCache { deposit_contract_deploy_block, + finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), ..Self::default() } } - /// Returns the number of deposits available in the cache. + pub fn from_deposit_snapshot( + deposit_contract_deploy_block: u64, + snapshot: &DepositTreeSnapshot, + ) -> Result { + let deposit_tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) + .map_err(|e| format!("Invalid DepositSnapshot: {:?}", e))?; + Ok(DepositCache { + logs: Vec::new(), + leaves: Vec::new(), + deposit_contract_deploy_block, + finalized_deposit_count: snapshot.deposit_count, + finalized_block_height: snapshot.execution_block_height, + deposit_tree, + deposit_roots: vec![snapshot.deposit_root], + }) + } + + /// Returns the number of deposits the cache stores pub fn len(&self) -> usize { - self.logs.len() + self.finalized_deposit_count as usize + self.logs.len() } /// True if the cache does not store any blocks. pub fn is_empty(&self) -> bool { - self.logs.is_empty() + self.finalized_deposit_count != 0 && self.logs.is_empty() } /// Returns the block number for the most recent deposit in the cache. - pub fn latest_block_number(&self) -> Option { - self.logs.last().map(|log| log.block_number) + pub fn latest_block_number(&self) -> u64 { + self.logs + .last() + .map(|log| log.block_number) + .unwrap_or(self.finalized_block_height) } - /// Returns an iterator over all the logs in `self`. + /// Returns an iterator over all the logs in `self` that aren't finalized. pub fn iter(&self) -> impl Iterator { self.logs.iter() } - /// Returns the i'th deposit log. - pub fn get(&self, i: usize) -> Option<&DepositLog> { - self.logs.get(i) + /// Returns the deposit log with INDEX i. + pub fn get_log(&self, i: usize) -> Option<&DepositLog> { + let finalized_deposit_count = self.finalized_deposit_count as usize; + if i < finalized_deposit_count { + None + } else { + self.logs.get(i - finalized_deposit_count) + } + } + + /// Returns the deposit root with DEPOSIT COUNT (not index) i + pub fn get_root(&self, i: usize) -> Option<&Hash256> { + let finalized_deposit_count = self.finalized_deposit_count as usize; + if i < finalized_deposit_count { + None + } else { + self.deposit_roots.get(i - finalized_deposit_count) + } + } + + /// Returns the finalized deposit count + pub fn finalized_deposit_count(&self) -> u64 { + self.finalized_deposit_count + } + + /// Finalizes the cache up to `eth1_block.deposit_count`. + pub fn finalize(&mut self, eth1_block: Eth1Block) -> Result<(), Error> { + let deposits_to_finalize = eth1_block.deposit_count.ok_or_else(|| { + Error::Internal("Eth1Block did not contain deposit_count".to_string()) + })?; + + let currently_finalized = self.finalized_deposit_count; + if deposits_to_finalize > self.len() as u64 || deposits_to_finalize <= currently_finalized { + Err(Error::InvalidFinalizeIndex { + requested_count: deposits_to_finalize, + currently_finalized, + deposit_count: self.len() as u64, + }) + } else { + let finalized_log = self + .get_log((deposits_to_finalize - 1) as usize) + .cloned() + .ok_or(Error::PleaseNotifyTheDevs)?; + let drop = (deposits_to_finalize - currently_finalized) as usize; + self.deposit_tree + .finalize(eth1_block.into()) + .map_err(Error::DepositTree)?; + self.logs.drain(0..drop); + self.leaves.drain(0..drop); + self.deposit_roots.drain(0..drop); + self.finalized_deposit_count = deposits_to_finalize; + self.finalized_block_height = finalized_log.block_number; + + Ok(()) + } + } + + /// Returns the deposit tree snapshot (if tree is finalized) + pub fn get_deposit_snapshot(&self) -> Option { + self.deposit_tree.get_snapshot() } /// Adds `log` to self. @@ -153,19 +294,29 @@ impl DepositCache { /// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty). /// - If a log with `log.index` is already known, but the given `log` is distinct to it. pub fn insert_log(&mut self, log: DepositLog) -> Result { - match log.index.cmp(&(self.logs.len() as u64)) { + match log.index.cmp(&(self.len() as u64)) { Ordering::Equal => { let deposit = log.deposit_data.tree_hash_root(); - self.leaves.push(deposit); - self.logs.push(log); + // should push to deposit_tree first because it's fallible self.deposit_tree .push_leaf(deposit) .map_err(Error::DepositTree)?; + self.leaves.push(deposit); + self.logs.push(log); self.deposit_roots.push(self.deposit_tree.root()); Ok(DepositCacheInsertOutcome::Inserted) } Ordering::Less => { - if self.logs[log.index as usize] == log { + let mut compare_index = log.index as usize; + if log.index < self.finalized_deposit_count { + return Err(Error::FinalizedLogInsert { + log_index: log.index, + finalized_index: self.finalized_deposit_count - 1, + }); + } else { + compare_index -= self.finalized_deposit_count as usize; + } + if self.logs[compare_index] == log { Ok(DepositCacheInsertOutcome::Duplicate) } else { Err(Error::DuplicateDistinctLog(log.index)) @@ -187,14 +338,13 @@ impl DepositCache { /// /// ## Errors /// - /// - If `deposit_count` is larger than `end`. + /// - If `deposit_count` is less than `end`. /// - There are not sufficient deposits in the tree to generate the proof. pub fn get_deposits( &self, start: u64, end: u64, deposit_count: u64, - tree_depth: usize, ) -> Result<(Hash256, Vec), Error> { if deposit_count < end { // It's invalid to ask for more deposits than should exist. @@ -202,48 +352,66 @@ impl DepositCache { deposit_count, range_end: end, }) - } else if end > self.logs.len() as u64 { + } else if end > self.len() as u64 { // The range of requested deposits exceeds the deposits stored locally. Err(Error::InsufficientDeposits { requested: end, known_deposits: self.logs.len(), }) - } else if deposit_count > self.leaves.len() as u64 { - // There are not `deposit_count` known deposit roots, so we can't build the merkle tree - // to prove into. - Err(Error::InsufficientDeposits { - requested: deposit_count, - known_deposits: self.logs.len(), + } else if self.finalized_deposit_count > start { + // Can't ask for deposits before or on the finalized deposit + Err(Error::DepositRangeInvalid { + range_start: start, + finalized_count: self.finalized_deposit_count, }) } else { + let (start, end, deposit_count) = ( + start - self.finalized_deposit_count, + end - self.finalized_deposit_count, + deposit_count - self.finalized_deposit_count, + ); let leaves = self .leaves .get(0..deposit_count as usize) .ok_or_else(|| Error::Internal("Unable to get known leaves".into()))?; - // Note: there is likely a more optimal solution than recreating the `DepositDataTree` - // each time this function is called. - // - // Perhaps a base merkle tree could be maintained that contains all deposits up to the - // last finalized eth1 deposit count. Then, that tree could be cloned and extended for - // each of these calls. + let tree = self + .deposit_tree + .get_snapshot() + .map(|snapshot| { + // The tree has already been finalized. So we can just start from the snapshot + // and replay the deposits up to `deposit_count` + let mut tree = DepositDataTree::from_snapshot(&snapshot, DEPOSIT_TREE_DEPTH) + .map_err(Error::DepositTree)?; + for leaf in leaves { + tree.push_leaf(*leaf).map_err(Error::DepositTree)?; + } + Ok(tree) + }) + .unwrap_or_else(|| { + // Deposit tree hasn't been finalized yet, will have to re-create the whole tree + Ok(DepositDataTree::create( + leaves, + leaves.len(), + DEPOSIT_TREE_DEPTH, + )) + })?; - let tree = DepositDataTree::create(leaves, deposit_count as usize, tree_depth); - - let deposits = self - .logs + let mut deposits = vec![]; + self.logs .get(start as usize..end as usize) .ok_or_else(|| Error::Internal("Unable to get known log".into()))? .iter() - .map(|deposit_log| { - let (_leaf, proof) = tree.generate_proof(deposit_log.index as usize); - - Deposit { + .try_for_each(|deposit_log| { + let (_leaf, proof) = tree + .generate_proof(deposit_log.index as usize) + .map_err(Error::DepositTree)?; + deposits.push(Deposit { proof: proof.into(), data: deposit_log.deposit_data.clone(), - } - }) - .collect(); + }); + Ok(()) + })?; Ok((tree.root(), deposits)) } @@ -270,16 +438,24 @@ impl DepositCache { /// Returns the number of deposits that have been observed up to and /// including the block at `block_number`. /// - /// Returns `None` if the `block_number` is zero or prior to contract deployment. + /// Returns `None` if the `block_number` is zero or prior to contract deployment + /// or prior to last finalized deposit. pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option { - if block_number == 0 || block_number < self.deposit_contract_deploy_block { + if block_number == 0 + || block_number < self.deposit_contract_deploy_block + || block_number < self.finalized_block_height + { None + } else if block_number == self.finalized_block_height { + Some(self.finalized_deposit_count) } else { Some( - self.logs - .iter() - .take_while(|deposit| deposit.block_number <= block_number) - .count() as u64, + self.finalized_deposit_count + + self + .logs + .iter() + .take_while(|deposit| deposit.block_number <= block_number) + .count() as u64, ) } } @@ -289,8 +465,8 @@ impl DepositCache { /// Fetches the `deposit_count` on or just before the queried `block_number` /// and queries the `deposit_roots` map to get the corresponding `deposit_root`. pub fn get_deposit_root_from_cache(&self, block_number: u64) -> Option { - let index = self.get_deposit_count_from_cache(block_number)?; - Some(*self.deposit_roots.get(index as usize)?) + let count = self.get_deposit_count_from_cache(block_number)?; + self.get_root(count as usize).cloned() } } @@ -300,8 +476,6 @@ pub mod tests { use execution_layer::http::deposit_log::Log; use types::{EthSpec, MainnetEthSpec}; - pub const TREE_DEPTH: usize = 32; - /// The data from a deposit event, using the v0.8.3 version of the deposit contract. pub const EXAMPLE_LOG: &[u8] = &[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -338,32 +512,52 @@ pub mod tests { log.to_deposit_log(&spec).expect("should decode log") } + fn get_cache_with_deposits(n: u64) -> DepositCache { + let mut deposit_cache = DepositCache::default(); + for i in 0..n { + let mut log = example_log(); + log.index = i; + log.block_number = i; + log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); + deposit_cache + .insert_log(log) + .expect("should add consecutive logs"); + } + assert_eq!(deposit_cache.len() as u64, n, "should have {} deposits", n); + + deposit_cache + } + #[test] fn insert_log_valid() { - let mut tree = DepositCache::default(); + let mut deposit_cache = DepositCache::default(); for i in 0..16 { let mut log = example_log(); log.index = i; - tree.insert_log(log).expect("should add consecutive logs"); + deposit_cache + .insert_log(log) + .expect("should add consecutive logs"); } } #[test] fn insert_log_invalid() { - let mut tree = DepositCache::default(); + let mut deposit_cache = DepositCache::default(); for i in 0..4 { let mut log = example_log(); log.index = i; - tree.insert_log(log).expect("should add consecutive logs"); + deposit_cache + .insert_log(log) + .expect("should add consecutive logs"); } // Add duplicate, when given is the same as the one known. let mut log = example_log(); log.index = 3; assert_eq!( - tree.insert_log(log).unwrap(), + deposit_cache.insert_log(log).unwrap(), DepositCacheInsertOutcome::Duplicate ); @@ -371,54 +565,40 @@ pub mod tests { let mut log = example_log(); log.index = 3; log.block_number = 99; - assert!(tree.insert_log(log).is_err()); + assert!(deposit_cache.insert_log(log).is_err()); // Skip inserting a log. let mut log = example_log(); log.index = 5; - assert!(tree.insert_log(log).is_err()); + assert!(deposit_cache.insert_log(log).is_err()); } #[test] fn get_deposit_valid() { let n = 1_024; - let mut tree = DepositCache::default(); - - for i in 0..n { - let mut log = example_log(); - log.index = i; - log.block_number = i; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); - tree.insert_log(log).expect("should add consecutive logs"); - } + let deposit_cache = get_cache_with_deposits(n); // Get 0 deposits, with max deposit count. - let (_, deposits) = tree - .get_deposits(0, 0, n, TREE_DEPTH) + let (_, deposits) = deposit_cache + .get_deposits(0, 0, n) .expect("should get the full tree"); assert_eq!(deposits.len(), 0, "should return no deposits"); // Get 0 deposits, with 0 deposit count. - let (_, deposits) = tree - .get_deposits(0, 0, 0, TREE_DEPTH) - .expect("should get the full tree"); - assert_eq!(deposits.len(), 0, "should return no deposits"); - - // Get 0 deposits, with 0 deposit count, tree depth 0. - let (_, deposits) = tree - .get_deposits(0, 0, 0, 0) + let (_, deposits) = deposit_cache + .get_deposits(0, 0, 0) .expect("should get the full tree"); assert_eq!(deposits.len(), 0, "should return no deposits"); // Get all deposits, with max deposit count. - let (full_root, deposits) = tree - .get_deposits(0, n, n, TREE_DEPTH) + let (full_root, deposits) = deposit_cache + .get_deposits(0, n, n) .expect("should get the full tree"); assert_eq!(deposits.len(), n as usize, "should return all deposits"); // Get 4 deposits, with max deposit count. - let (root, deposits) = tree - .get_deposits(0, 4, n, TREE_DEPTH) + let (root, deposits) = deposit_cache + .get_deposits(0, 4, n) .expect("should get the four from the full tree"); assert_eq!( deposits.len(), @@ -432,14 +612,14 @@ pub mod tests { // Get half of the deposits, with half deposit count. let half = n / 2; - let (half_root, deposits) = tree - .get_deposits(0, half, half, TREE_DEPTH) + let (half_root, deposits) = deposit_cache + .get_deposits(0, half, half) .expect("should get the half tree"); assert_eq!(deposits.len(), half as usize, "should return half deposits"); // Get 4 deposits, with half deposit count. - let (root, deposits) = tree - .get_deposits(0, 4, n / 2, TREE_DEPTH) + let (root, deposits) = deposit_cache + .get_deposits(0, 4, n / 2) .expect("should get the half tree"); assert_eq!( deposits.len(), @@ -459,23 +639,455 @@ pub mod tests { #[test] fn get_deposit_invalid() { let n = 16; - let mut tree = DepositCache::default(); - - for i in 0..n { - let mut log = example_log(); - log.index = i; - log.block_number = i; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); - tree.insert_log(log).expect("should add consecutive logs"); - } + let mut tree = get_cache_with_deposits(n); // Range too high. - assert!(tree.get_deposits(0, n + 1, n, TREE_DEPTH).is_err()); + assert!(tree.get_deposits(0, n + 1, n).is_err()); // Count too high. - assert!(tree.get_deposits(0, n, n + 1, TREE_DEPTH).is_err()); + assert!(tree.get_deposits(0, n, n + 1).is_err()); // Range higher than count. - assert!(tree.get_deposits(0, 4, 2, TREE_DEPTH).is_err()); + assert!(tree.get_deposits(0, 4, 2).is_err()); + + let block7 = fake_eth1_block(&tree, 7).expect("should create fake eth1 block"); + tree.finalize(block7).expect("should finalize"); + // Range starts <= finalized deposit + assert!(tree.get_deposits(6, 9, 11).is_err()); + assert!(tree.get_deposits(7, 9, 11).is_err()); + // Range start > finalized deposit should be OK + assert!(tree.get_deposits(8, 9, 11).is_ok()); + } + + // returns an eth1 block that can be used to finalize the cache at `deposit_index` + // this will ensure the `deposit_root` on the `Eth1Block` is correct + fn fake_eth1_block(deposit_cache: &DepositCache, deposit_index: usize) -> Option { + let deposit_log = deposit_cache.get_log(deposit_index)?; + Some(Eth1Block { + hash: Hash256::from_low_u64_be(deposit_log.block_number), + timestamp: 0, + number: deposit_log.block_number, + deposit_root: deposit_cache.get_root(deposit_index + 1).cloned(), + deposit_count: Some(deposit_log.index + 1), + }) + } + + #[test] + fn test_finalization_boundaries() { + let n = 8; + let half = (n / 2) as usize; + + let mut deposit_cache = get_cache_with_deposits(n as u64); + + let full_root_before_finalization = deposit_cache.deposit_tree.root(); + let half_log_plus1_before_finalization = deposit_cache + .get_log(half + 1) + .expect("log should exist") + .clone(); + let half_root_plus1_before_finalization = + *deposit_cache.get_root(half + 1).expect("root should exist"); + + let (root_before_finalization, proof_before_finalization) = deposit_cache + .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) + .expect("should return 1 deposit with proof"); + + // finalize on the tree at half + let half_block = + fake_eth1_block(&deposit_cache, half).expect("fake block should be created"); + assert!( + deposit_cache.get_deposit_snapshot().is_none(), + "snapshot should not exist as tree has not been finalized" + ); + deposit_cache + .finalize(half_block) + .expect("tree should_finalize"); + + // check boundary conditions for get_log + assert!( + deposit_cache.get_log(half).is_none(), + "log at finalized deposit should NOT exist" + ); + assert_eq!( + *deposit_cache.get_log(half + 1).expect("log should exist"), + half_log_plus1_before_finalization, + "log after finalized deposit should match before finalization" + ); + // check boundary conditions for get_root + assert!( + deposit_cache.get_root(half).is_none(), + "root at finalized deposit should NOT exist" + ); + assert_eq!( + *deposit_cache.get_root(half + 1).expect("root should exist"), + half_root_plus1_before_finalization, + "root after finalized deposit should match before finalization" + ); + // full root should match before and after finalization + assert_eq!( + deposit_cache.deposit_tree.root(), + full_root_before_finalization, + "full root should match before and after finalization" + ); + // check boundary conditions for get_deposits (proof) + assert!( + deposit_cache + .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) + .is_err(), + "cannot prove the finalized deposit" + ); + let (root_after_finalization, proof_after_finalization) = deposit_cache + .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) + .expect("should return 1 deposit with proof"); + assert_eq!( + root_before_finalization, root_after_finalization, + "roots before and after finalization should match" + ); + assert_eq!( + proof_before_finalization, proof_after_finalization, + "proof before and after finalization should match" + ); + + // recover tree from snapshot by replaying deposits + let snapshot = deposit_cache + .get_deposit_snapshot() + .expect("snapshot should exist"); + let mut recovered = DepositCache::from_deposit_snapshot(1, &snapshot) + .expect("should recover finalized tree"); + for i in half + 1..n { + let mut log = example_log(); + log.index = i as u64; + log.block_number = i as u64; + log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i as u64); + recovered + .insert_log(log) + .expect("should add consecutive logs"); + } + + // check the same boundary conditions above for the recovered tree + assert!( + recovered.get_log(half).is_none(), + "log at finalized deposit should NOT exist" + ); + assert_eq!( + *recovered.get_log(half + 1).expect("log should exist"), + half_log_plus1_before_finalization, + "log after finalized deposit should match before finalization in recovered tree" + ); + // check boundary conditions for get_root + assert!( + recovered.get_root(half).is_none(), + "root at finalized deposit should NOT exist" + ); + assert_eq!( + *recovered.get_root(half + 1).expect("root should exist"), + half_root_plus1_before_finalization, + "root after finalized deposit should match before finalization in recovered tree" + ); + // full root should match before and after finalization + assert_eq!( + recovered.deposit_tree.root(), + full_root_before_finalization, + "full root should match before and after finalization" + ); + // check boundary conditions for get_deposits (proof) + assert!( + recovered + .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) + .is_err(), + "cannot prove the finalized deposit" + ); + let (recovered_root_after_finalization, recovered_proof_after_finalization) = recovered + .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) + .expect("should return 1 deposit with proof"); + assert_eq!( + root_before_finalization, recovered_root_after_finalization, + "recovered roots before and after finalization should match" + ); + assert_eq!( + proof_before_finalization, recovered_proof_after_finalization, + "recovered proof before and after finalization should match" + ); + } + + #[test] + fn test_finalization() { + let n = 1024; + let half = n / 2; + let quarter = half / 2; + let mut deposit_cache = get_cache_with_deposits(n); + + let full_root_before_finalization = deposit_cache.deposit_tree.root(); + let q3_root_before_finalization = deposit_cache + .get_root((half + quarter) as usize) + .cloned() + .expect("root should exist"); + let q3_log_before_finalization = deposit_cache + .get_log((half + quarter) as usize) + .cloned() + .expect("log should exist"); + // get_log(half+quarter) should return log with index `half+quarter` + assert_eq!( + q3_log_before_finalization.index, + (half + quarter) as u64, + "log index should be {}", + (half + quarter), + ); + + // get lower quarter of deposits with max deposit count + let (lower_quarter_root_before_finalization, lower_quarter_deposits_before_finalization) = + deposit_cache + .get_deposits(quarter, half, n) + .expect("should get lower quarter"); + assert_eq!( + lower_quarter_deposits_before_finalization.len(), + quarter as usize, + "should get {} deposits from lower quarter", + quarter, + ); + // since the lower quarter was done with full deposits, root should be the same as full_root_before_finalization + assert_eq!( + lower_quarter_root_before_finalization, full_root_before_finalization, + "should still get full root with deposit subset", + ); + + // get upper quarter of deposits with slightly reduced deposit count + let (upper_quarter_root_before_finalization, upper_quarter_deposits_before_finalization) = + deposit_cache + .get_deposits(half, half + quarter, n - 2) + .expect("should get upper quarter"); + assert_eq!( + upper_quarter_deposits_before_finalization.len(), + quarter as usize, + "should get {} deposits from upper quarter", + quarter, + ); + // since upper quarter was with subset of nodes, it should differ from full root + assert_ne!( + full_root_before_finalization, upper_quarter_root_before_finalization, + "subtree root should differ from full root", + ); + + let f0_log = deposit_cache + .get_log((quarter - 1) as usize) + .cloned() + .expect("should return log"); + let f0_block = fake_eth1_block(&deposit_cache, (quarter - 1) as usize) + .expect("fake eth1 block should be created"); + + // finalize first quarter + deposit_cache + .finalize(f0_block) + .expect("should finalize first quarter"); + // finalized count and block number should match log + assert_eq!( + deposit_cache.finalized_deposit_count, + f0_log.index + 1, + "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", + ); + assert_eq!( + deposit_cache.finalized_block_height, + f0_log.block_number, + "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" + ); + // check get_log boundaries + assert!( + deposit_cache.get_log((quarter - 1) as usize).is_none(), + "get_log() should return None for index <= finalized log index", + ); + assert!( + deposit_cache.get_log(quarter as usize).is_some(), + "get_log() should return Some(log) for index >= finalized_deposit_count", + ); + + // full root should remain the same after finalization + assert_eq!( + full_root_before_finalization, + deposit_cache.deposit_tree.root(), + "root should be the same before and after finalization", + ); + // get_root should return the same root before and after finalization + assert_eq!( + q3_root_before_finalization, + deposit_cache + .get_root((half + quarter) as usize) + .cloned() + .expect("root should exist"), + "get_root should return the same root before and after finalization", + ); + // get_log should return the same log before and after finalization + assert_eq!( + q3_log_before_finalization, + deposit_cache + .get_log((half + quarter) as usize) + .cloned() + .expect("log should exist"), + "get_log should return the same log before and after finalization", + ); + + // again get lower quarter of deposits with max deposit count after finalization + let (f0_lower_quarter_root, f0_lower_quarter_deposits) = deposit_cache + .get_deposits(quarter, half, n) + .expect("should get lower quarter"); + assert_eq!( + f0_lower_quarter_deposits.len(), + quarter as usize, + "should get {} deposits from lower quarter", + quarter, + ); + // again get upper quarter of deposits with slightly reduced deposit count after finalization + let (f0_upper_quarter_root, f0_upper_quarter_deposits) = deposit_cache + .get_deposits(half, half + quarter, n - 2) + .expect("should get upper quarter"); + assert_eq!( + f0_upper_quarter_deposits.len(), + quarter as usize, + "should get {} deposits from upper quarter", + quarter, + ); + + // lower quarter root and deposits should be the same + assert_eq!( + lower_quarter_root_before_finalization, f0_lower_quarter_root, + "root should be the same before and after finalization", + ); + for i in 0..lower_quarter_deposits_before_finalization.len() { + assert_eq!( + lower_quarter_deposits_before_finalization[i], f0_lower_quarter_deposits[i], + "get_deposits() should be the same before and after finalization", + ); + } + // upper quarter root and deposits should be the same + assert_eq!( + upper_quarter_root_before_finalization, f0_upper_quarter_root, + "subtree root should be the same before and after finalization", + ); + for i in 0..upper_quarter_deposits_before_finalization.len() { + assert_eq!( + upper_quarter_deposits_before_finalization[i], f0_upper_quarter_deposits[i], + "get_deposits() should be the same before and after finalization", + ); + } + + let f1_log = deposit_cache + .get_log((half - 2) as usize) + .cloned() + .expect("should return log"); + // finalize a little less than half to test multiple finalization + let f1_block = fake_eth1_block(&deposit_cache, (half - 2) as usize) + .expect("should create fake eth1 block"); + deposit_cache + .finalize(f1_block) + .expect("should finalize a little less than half"); + // finalized count and block number should match f1_log + assert_eq!( + deposit_cache.finalized_deposit_count, + f1_log.index + 1, + "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", + ); + assert_eq!( + deposit_cache.finalized_block_height, + f1_log.block_number, + "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" + ); + // check get_log boundaries + assert!( + deposit_cache.get_log((half - 2) as usize).is_none(), + "get_log() should return None for index <= finalized log index", + ); + assert!( + deposit_cache.get_log((half - 1) as usize).is_some(), + "get_log() should return Some(log) for index >= finalized_deposit_count", + ); + + // full root should still be unchanged + assert_eq!( + full_root_before_finalization, + deposit_cache.deposit_tree.root(), + "root should be the same before and after finalization", + ); + + // again get upper quarter of deposits with slightly reduced deposit count after second finalization + let (f1_upper_quarter_root, f1_upper_quarter_deposits) = deposit_cache + .get_deposits(half, half + quarter, n - 2) + .expect("should get upper quarter"); + + // upper quarter root and deposits should be the same after second finalization + assert_eq!( + f0_upper_quarter_root, f1_upper_quarter_root, + "subtree root should be the same after multiple finalization", + ); + for i in 0..f0_upper_quarter_deposits.len() { + assert_eq!( + f0_upper_quarter_deposits[i], f1_upper_quarter_deposits[i], + "get_deposits() should be the same before and after finalization", + ); + } + } + + fn verify_equality(original: &DepositCache, copy: &DepositCache) { + // verify each field individually so that if one field should + // fail to recover, this test will point right to it + assert_eq!(original.deposit_contract_deploy_block, copy.deposit_contract_deploy_block, "DepositCache: deposit_contract_deploy_block should remain the same after encoding and decoding from ssz" ); + assert_eq!( + original.leaves, copy.leaves, + "DepositCache: leaves should remain the same after encoding and decoding from ssz" + ); + assert_eq!( + original.logs, copy.logs, + "DepositCache: logs should remain the same after encoding and decoding from ssz" + ); + assert_eq!(original.finalized_deposit_count, copy.finalized_deposit_count, "DepositCache: finalized_deposit_count should remain the same after encoding and decoding from ssz"); + assert_eq!(original.finalized_block_height, copy.finalized_block_height, "DepositCache: finalized_block_height should remain the same after encoding and decoding from ssz"); + assert_eq!(original.deposit_roots, copy.deposit_roots, "DepositCache: deposit_roots should remain the same before and after encoding and decoding from ssz"); + assert!(original.deposit_tree == copy.deposit_tree, "DepositCache: deposit_tree should remain the same before and after encoding and decoding from ssz"); + // verify all together for good measure + assert!( + original == copy, + "Deposit cache should remain the same after encoding and decoding from ssz" + ); + } + + fn ssz_round_trip(original: &DepositCache) -> DepositCache { + use ssz::{Decode, Encode}; + let bytes = SszDepositCache::from_deposit_cache(original).as_ssz_bytes(); + let ssz_cache = + SszDepositCache::from_ssz_bytes(&bytes).expect("should decode from ssz bytes"); + + SszDepositCache::to_deposit_cache(&ssz_cache).expect("should recover cache") + } + + #[test] + fn ssz_encode_decode() { + let deposit_cache = get_cache_with_deposits(512); + let recovered_cache = ssz_round_trip(&deposit_cache); + + verify_equality(&deposit_cache, &recovered_cache); + } + + #[test] + fn ssz_encode_decode_with_finalization() { + let mut deposit_cache = get_cache_with_deposits(512); + let block383 = fake_eth1_block(&deposit_cache, 383).expect("should create fake eth1 block"); + deposit_cache.finalize(block383).expect("should finalize"); + let mut first_recovery = ssz_round_trip(&deposit_cache); + + verify_equality(&deposit_cache, &first_recovery); + // finalize again to verify equality after multiple finalizations + let block447 = fake_eth1_block(&deposit_cache, 447).expect("should create fake eth1 block"); + first_recovery.finalize(block447).expect("should finalize"); + + let mut second_recovery = ssz_round_trip(&first_recovery); + verify_equality(&first_recovery, &second_recovery); + + // verify equality of a tree that finalized block383, block447, block479 + // with a tree that finalized block383, block479 + let block479 = fake_eth1_block(&deposit_cache, 479).expect("should create fake eth1 block"); + second_recovery + .finalize(block479.clone()) + .expect("should finalize"); + let third_recovery = ssz_round_trip(&second_recovery); + deposit_cache.finalize(block479).expect("should finalize"); + + verify_equality(&deposit_cache, &third_recovery); } } diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index b0a951bef0..0468a02d2e 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -2,14 +2,15 @@ use crate::service::endpoint_from_config; use crate::Config; use crate::{ block_cache::{BlockCache, Eth1Block}, - deposit_cache::{DepositCache, SszDepositCache}, + deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13}, }; use execution_layer::HttpJsonRpc; use parking_lot::RwLock; use ssz::four_byte_option_impl; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use types::ChainSpec; +use superstruct::superstruct; +use types::{ChainSpec, DepositTreeSnapshot, Eth1Data}; // Define "legacy" implementations of `Option` which use four bytes for encoding the union // selector. @@ -29,12 +30,25 @@ impl DepositUpdater { last_processed_block: None, } } + + pub fn from_snapshot( + deposit_contract_deploy_block: u64, + snapshot: &DepositTreeSnapshot, + ) -> Result { + let last_processed_block = Some(snapshot.execution_block_height); + Ok(Self { + cache: DepositCache::from_deposit_snapshot(deposit_contract_deploy_block, snapshot)?, + last_processed_block, + }) + } } pub struct Inner { pub block_cache: RwLock, pub deposit_cache: RwLock, pub endpoint: HttpJsonRpc, + // this gets set to Some(Eth1Data) when the deposit finalization conditions are met + pub to_finalize: RwLock>, pub config: RwLock, pub remote_head_block: RwLock>, pub spec: ChainSpec, @@ -58,9 +72,13 @@ impl Inner { /// Recover `Inner` given byte representation of eth1 deposit and block caches. pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result { - let ssz_cache = SszEth1Cache::from_ssz_bytes(bytes) - .map_err(|e| format!("Ssz decoding error: {:?}", e))?; - ssz_cache.to_inner(config, spec) + SszEth1Cache::from_ssz_bytes(bytes) + .map_err(|e| format!("Ssz decoding error: {:?}", e))? + .to_inner(config, spec) + .map(|inner| { + inner.block_cache.write().rebuild_by_hash_map(); + inner + }) } /// Returns a reference to the specification. @@ -69,12 +87,21 @@ impl Inner { } } -#[derive(Encode, Decode, Clone)] +pub type SszEth1Cache = SszEth1CacheV13; + +#[superstruct( + variants(V1, V13), + variant_attributes(derive(Encode, Decode, Clone)), + no_enum +)] pub struct SszEth1Cache { - block_cache: BlockCache, - deposit_cache: SszDepositCache, + pub block_cache: BlockCache, + #[superstruct(only(V1))] + pub deposit_cache: SszDepositCacheV1, + #[superstruct(only(V13))] + pub deposit_cache: SszDepositCacheV13, #[ssz(with = "four_byte_option_u64")] - last_processed_block: Option, + pub last_processed_block: Option, } impl SszEth1Cache { @@ -97,6 +124,7 @@ impl SszEth1Cache { }), endpoint: endpoint_from_config(&config) .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, + to_finalize: RwLock::new(None), // Set the remote head_block zero when creating a new instance. We only care about // present and future eth1 nodes. remote_head_block: RwLock::new(None), diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index f99d085250..3b288de490 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -8,9 +8,9 @@ mod metrics; mod service; pub use block_cache::{BlockCache, Eth1Block}; -pub use deposit_cache::DepositCache; +pub use deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13}; pub use execution_layer::http::deposit_log::DepositLog; -pub use inner::SszEth1Cache; +pub use inner::{SszEth1Cache, SszEth1CacheV1, SszEth1CacheV13}; pub use service::{ BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service, DEFAULT_CHAIN_ID, diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index c6b87e88e3..f24b746cd4 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -20,7 +20,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{interval_at, Duration, Instant}; -use types::{ChainSpec, EthSpec, Unsigned}; +use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned}; /// Indicates the default eth1 chain id we use for the deposit contract. pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; @@ -63,7 +63,13 @@ async fn endpoint_state( config_chain_id: &Eth1Id, log: &Logger, ) -> EndpointState { - let error_connecting = |e| { + let error_connecting = |e: String| { + debug!( + log, + "eth1 endpoint error"; + "endpoint" => %endpoint, + "error" => &e, + ); warn!( log, "Error connecting to eth1 node endpoint"; @@ -213,6 +219,10 @@ pub enum Error { GetDepositLogsFailed(String), /// There was an unexpected internal error. Internal(String), + /// Error finalizing deposit + FailedToFinalizeDeposit(String), + /// There was a problem Initializing from deposit snapshot + FailedToInitializeFromSnapshot(String), } /// The success message for an Eth1Data cache update. @@ -395,6 +405,7 @@ impl Service { config.deposit_contract_deploy_block, )), endpoint: endpoint_from_config(&config)?, + to_finalize: RwLock::new(None), remote_head_block: RwLock::new(None), config: RwLock::new(config), spec, @@ -407,6 +418,36 @@ impl Service { &self.inner.endpoint } + /// Creates a new service, initializing the deposit tree from a snapshot. + pub fn from_deposit_snapshot( + config: Config, + log: Logger, + spec: ChainSpec, + deposit_snapshot: &DepositTreeSnapshot, + ) -> Result { + let deposit_cache = + DepositUpdater::from_snapshot(config.deposit_contract_deploy_block, deposit_snapshot) + .map_err(Error::FailedToInitializeFromSnapshot)?; + + Ok(Self { + inner: Arc::new(Inner { + block_cache: <_>::default(), + deposit_cache: RwLock::new(deposit_cache), + endpoint: endpoint_from_config(&config) + .map_err(Error::FailedToInitializeFromSnapshot)?, + to_finalize: RwLock::new(None), + remote_head_block: RwLock::new(None), + config: RwLock::new(config), + spec, + }), + log, + }) + } + + pub fn set_to_finalize(&self, eth1_data: Option) { + *(self.inner.to_finalize.write()) = eth1_data; + } + /// Returns the follow distance that has been shortened to accommodate for differences in the /// spacing between blocks. /// @@ -521,7 +562,7 @@ impl Service { let deposits = self.deposits().read(); deposits .cache - .get_valid_signature_count(deposits.cache.latest_block_number()?) + .get_valid_signature_count(deposits.cache.latest_block_number()) } /// Returns the number of deposits with valid signatures that have been observed up to and @@ -619,7 +660,8 @@ impl Service { "old_block_number" => deposit_cache.last_processed_block, "new_block_number" => deposit_cache.cache.latest_block_number(), ); - deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number(); + deposit_cache.last_processed_block = + Some(deposit_cache.cache.latest_block_number()); } let outcome = @@ -698,6 +740,37 @@ impl Service { "deposits" => format!("{:?}", deposit), ), }; + let optional_eth1data = self.inner.to_finalize.write().take(); + if let Some(eth1data_to_finalize) = optional_eth1data { + let already_finalized = self + .inner + .deposit_cache + .read() + .cache + .finalized_deposit_count(); + let deposit_count_to_finalize = eth1data_to_finalize.deposit_count; + if deposit_count_to_finalize > already_finalized { + match self.finalize_deposits(eth1data_to_finalize) { + Err(e) => error!( + self.log, + "Failed to finalize deposit cache"; + "error" => ?e, + ), + Ok(()) => info!( + self.log, + "Successfully finalized deposit tree"; + "finalized deposit count" => deposit_count_to_finalize, + ), + } + } else { + debug!( + self.log, + "Deposits tree already finalized"; + "already_finalized" => already_finalized, + "deposit_count_to_finalize" => deposit_count_to_finalize, + ); + } + } Ok(()) } @@ -733,6 +806,30 @@ impl Service { ) } + pub fn finalize_deposits(&self, eth1_data: Eth1Data) -> Result<(), Error> { + let eth1_block = self + .inner + .block_cache + .read() + .block_by_hash(ð1_data.block_hash) + .cloned() + .ok_or_else(|| { + Error::FailedToFinalizeDeposit( + "Finalized block not found in block cache".to_string(), + ) + })?; + self.inner + .deposit_cache + .write() + .cache + .finalize(eth1_block) + .map_err(|e| Error::FailedToFinalizeDeposit(format!("{:?}", e))) + } + + pub fn get_deposit_snapshot(&self) -> Option { + self.inner.deposit_cache.read().cache.get_deposit_snapshot() + } + /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured /// follow-distance block. /// diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 7e58f07e24..069a6e4aad 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -400,7 +400,7 @@ mod deposit_tree { .deposits() .read() .cache - .get_deposits(first, last, last, 32) + .get_deposits(first, last, last) .unwrap_or_else(|_| panic!("should get deposits in round {}", round)); assert_eq!( @@ -551,7 +551,7 @@ mod deposit_tree { // Ensure that the root from the deposit tree matches what the contract reported. let (root, deposits) = tree - .get_deposits(0, i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH) + .get_deposits(0, i as u64, deposit_counts[i]) .expect("should get deposits"); assert_eq!( root, deposit_roots[i], diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index be68c37b06..7453663012 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -208,6 +208,7 @@ pub mod deposit_methods { #[derive(Clone, Copy)] pub enum BlockQuery { Number(u64), + Hash(Hash256), Latest, } @@ -322,9 +323,12 @@ pub mod deposit_methods { query: BlockQuery, timeout: Duration, ) -> Result { - let query_param = match query { - BlockQuery::Number(block_number) => format!("0x{:x}", block_number), - BlockQuery::Latest => "latest".to_string(), + let (method, query_param) = match query { + BlockQuery::Number(block_number) => { + ("eth_getBlockByNumber", format!("0x{:x}", block_number)) + } + BlockQuery::Hash(block_hash) => ("eth_getBlockByHash", format!("{:?}", block_hash)), + BlockQuery::Latest => ("eth_getBlockByNumber", "latest".to_string()), }; let params = json!([ query_param, @@ -332,9 +336,9 @@ pub mod deposit_methods { ]); let response: Value = self - .rpc_request("eth_getBlockByNumber", params, timeout) + .rpc_request(method, params, timeout) .await - .map_err(|e| format!("eth_getBlockByNumber call failed {:?}", e))?; + .map_err(|e| format!("{} call failed {:?}", method, e))?; let hash: Vec = hex_to_bytes( response diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs index 0d483f9834..06bf99f9f6 100644 --- a/beacon_node/genesis/src/common.rs +++ b/beacon_node/genesis/src/common.rs @@ -23,7 +23,9 @@ pub fn genesis_deposits( return Err(String::from("Failed to push leaf")); } - let (_, mut proof) = tree.generate_proof(i, depth); + let (_, mut proof) = tree + .generate_proof(i, depth) + .map_err(|e| format!("Error generating merkle proof: {:?}", e))?; proof.push(Hash256::from_slice(&int_to_fixed_bytes32((i + 1) as u64))); assert_eq!( diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 5614e237ff..b7134e37c4 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -86,7 +86,7 @@ impl Eth1GenesisService { .deposits() .read() .cache - .get(min_genesis_active_validator_count.saturating_sub(1)) + .get_log(min_genesis_active_validator_count.saturating_sub(1)) .map(|log| log.block_number) } } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5b4fa5816d..4267a22f98 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1533,6 +1533,53 @@ pub fn serve( }, ); + // GET beacon/deposit_snapshot + let get_beacon_deposit_snapshot = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("deposit_snapshot")) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and(eth1_service_filter.clone()) + .and_then( + |accept_header: Option, eth1_service: eth1::Service| { + blocking_task(move || match accept_header { + Some(api_types::Accept::Json) | None => { + let snapshot = eth1_service.get_deposit_snapshot(); + Ok( + warp::reply::json(&api_types::GenericResponse::from(snapshot)) + .into_response(), + ) + } + _ => eth1_service + .get_deposit_snapshot() + .map(|snapshot| { + Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(snapshot.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + }) + .unwrap_or_else(|| { + Response::builder() + .status(503) + .header("Content-Type", "application/octet-stream") + .body(Vec::new().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + }), + }) + }, + ); + /* * config */ @@ -3120,6 +3167,7 @@ pub fn serve( .or(get_beacon_pool_attester_slashings.boxed()) .or(get_beacon_pool_proposer_slashings.boxed()) .or(get_beacon_pool_voluntary_exits.boxed()) + .or(get_beacon_deposit_snapshot.boxed()) .or(get_config_fork_schedule.boxed()) .or(get_config_spec.boxed()) .or(get_config_deposit_contract.boxed()) diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 4f35c4c072..5cb3f12200 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(12); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 104ca9ccd4..a2fb082a35 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -114,6 +114,7 @@ pub struct Timeouts { pub sync_duties: Duration, pub get_beacon_blocks_ssz: Duration, pub get_debug_beacon_states: Duration, + pub get_deposit_snapshot: Duration, } impl Timeouts { @@ -128,6 +129,7 @@ impl Timeouts { sync_duties: timeout, get_beacon_blocks_ssz: timeout, get_debug_beacon_states: timeout, + get_deposit_snapshot: timeout, } } } @@ -913,6 +915,20 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `GET beacon/deposit_snapshot` + pub async fn get_deposit_snapshot(&self) -> Result, Error> { + use ssz::Decode; + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("deposit_snapshot"); + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_deposit_snapshot) + .await? + .map(|bytes| DepositTreeSnapshot::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz)) + .transpose() + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 91e6a5558b..2dced1c449 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -6,7 +6,10 @@ mod block_rewards; use crate::{ ok_or_error, - types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId}, + types::{ + BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, + GenericResponse, ValidatorId, + }, BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, }; use proto_array::core::ProtoArray; @@ -331,6 +334,19 @@ impl Eth1Block { } } +impl From for FinalizedExecutionBlock { + fn from(eth1_block: Eth1Block) -> Self { + Self { + deposit_count: eth1_block.deposit_count.unwrap_or(0), + deposit_root: eth1_block + .deposit_root + .unwrap_or_else(|| DepositTreeSnapshot::default().deposit_root), + block_hash: eth1_block.hash, + block_height: eth1_block.number, + } + } +} + #[derive(Debug, Serialize, Deserialize)] pub struct DatabaseInfo { pub schema_version: u64, diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index da9b78ff11..887deb1efd 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -19,6 +19,8 @@ lazy_static! { /// indices are populated by non-zero leaves (perfect for the deposit contract tree). #[derive(Debug, PartialEq)] pub enum MerkleTree { + /// Finalized Node + Finalized(H256), /// Leaf node with the hash of its content. Leaf(H256), /// Internal node with hash, left subtree and right subtree. @@ -41,6 +43,24 @@ pub enum MerkleTreeError { DepthTooSmall, // Overflow occurred ArithError, + // Can't finalize a zero node + ZeroNodeFinalized, + // Can't push to finalized node + FinalizedNodePushed, + // Invalid Snapshot + InvalidSnapshot(InvalidSnapshot), + // Can't proof a finalized node + ProofEncounteredFinalizedNode, + // This should never happen + PleaseNotifyTheDevs, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum InvalidSnapshot { + // Branch hashes are empty but deposits are not + EmptyBranchWithNonZeroDeposits(usize), + // End of tree reached but deposits != 1 + EndOfTree, } impl MerkleTree { @@ -97,9 +117,11 @@ impl MerkleTree { let right: &mut MerkleTree = &mut *right; match (&*left, &*right) { // Tree is full - (Leaf(_), Leaf(_)) => return Err(MerkleTreeError::MerkleTreeFull), + (Leaf(_), Leaf(_)) | (Finalized(_), Leaf(_)) => { + return Err(MerkleTreeError::MerkleTreeFull) + } // There is a right node so insert in right node - (Node(_, _, _), Node(_, _, _)) => { + (Node(_, _, _), Node(_, _, _)) | (Finalized(_), Node(_, _, _)) => { right.push_leaf(elem, depth - 1)?; } // Both branches are zero, insert in left one @@ -107,7 +129,7 @@ impl MerkleTree { *left = MerkleTree::create(&[elem], depth - 1); } // Leaf on left branch and zero on right branch, insert on right side - (Leaf(_), Zero(_)) => { + (Leaf(_), Zero(_)) | (Finalized(_), Zero(_)) => { *right = MerkleTree::create(&[elem], depth - 1); } // Try inserting on the left node -> if it fails because it is full, insert in right side. @@ -129,6 +151,7 @@ impl MerkleTree { right.hash().as_bytes(), )); } + Finalized(_) => return Err(MerkleTreeError::FinalizedNodePushed), } Ok(()) @@ -137,6 +160,7 @@ impl MerkleTree { /// Retrieve the root hash of this Merkle tree. pub fn hash(&self) -> H256 { match *self { + MerkleTree::Finalized(h) => h, MerkleTree::Leaf(h) => h, MerkleTree::Node(h, _, _) => h, MerkleTree::Zero(depth) => H256::from_slice(&ZERO_HASHES[depth]), @@ -146,7 +170,7 @@ impl MerkleTree { /// Get a reference to the left and right subtrees if they exist. pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> { match *self { - MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, + MerkleTree::Finalized(_) | MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, MerkleTree::Node(_, ref l, ref r) => Some((l, r)), MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])), } @@ -157,16 +181,125 @@ impl MerkleTree { matches!(self, MerkleTree::Leaf(_)) } + /// Finalize deposits up to deposit with count = deposits_to_finalize + pub fn finalize_deposits( + &mut self, + deposits_to_finalize: usize, + level: usize, + ) -> Result<(), MerkleTreeError> { + match self { + MerkleTree::Finalized(_) => Ok(()), + MerkleTree::Zero(_) => Err(MerkleTreeError::ZeroNodeFinalized), + MerkleTree::Leaf(hash) => { + if level != 0 { + // This shouldn't happen but this is a sanity check + return Err(MerkleTreeError::PleaseNotifyTheDevs); + } + *self = MerkleTree::Finalized(*hash); + Ok(()) + } + MerkleTree::Node(hash, left, right) => { + if level == 0 { + // this shouldn't happen but we'll put it here for safety + return Err(MerkleTreeError::PleaseNotifyTheDevs); + } + let deposits = 0x1 << level; + if deposits <= deposits_to_finalize { + *self = MerkleTree::Finalized(*hash); + return Ok(()); + } + left.finalize_deposits(deposits_to_finalize, level - 1)?; + if deposits_to_finalize > deposits / 2 { + let remaining = deposits_to_finalize - deposits / 2; + right.finalize_deposits(remaining, level - 1)?; + } + Ok(()) + } + } + } + + fn append_finalized_hashes(&self, result: &mut Vec) { + match self { + MerkleTree::Zero(_) | MerkleTree::Leaf(_) => {} + MerkleTree::Finalized(h) => result.push(*h), + MerkleTree::Node(_, left, right) => { + left.append_finalized_hashes(result); + right.append_finalized_hashes(result); + } + } + } + + pub fn get_finalized_hashes(&self) -> Vec { + let mut result = vec![]; + self.append_finalized_hashes(&mut result); + result + } + + pub fn from_finalized_snapshot( + finalized_branch: &[H256], + deposit_count: usize, + level: usize, + ) -> Result { + if finalized_branch.is_empty() { + return if deposit_count == 0 { + Ok(MerkleTree::Zero(level)) + } else { + Err(InvalidSnapshot::EmptyBranchWithNonZeroDeposits(deposit_count).into()) + }; + } + if deposit_count == (0x1 << level) { + return Ok(MerkleTree::Finalized( + *finalized_branch + .get(0) + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + )); + } + if level == 0 { + return Err(InvalidSnapshot::EndOfTree.into()); + } + + let (left, right) = match deposit_count.checked_sub(0x1 << (level - 1)) { + // left tree is fully finalized + Some(right_deposits) => { + let (left_hash, right_branch) = finalized_branch + .split_first() + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?; + ( + MerkleTree::Finalized(*left_hash), + MerkleTree::from_finalized_snapshot(right_branch, right_deposits, level - 1)?, + ) + } + // left tree is not fully finalized -> right tree is zero + None => ( + MerkleTree::from_finalized_snapshot(finalized_branch, deposit_count, level - 1)?, + MerkleTree::Zero(level - 1), + ), + }; + + let hash = H256::from_slice(&hash32_concat( + left.hash().as_bytes(), + right.hash().as_bytes(), + )); + Ok(MerkleTree::Node(hash, Box::new(left), Box::new(right))) + } + /// Return the leaf at `index` and a Merkle proof of its inclusion. /// /// The Merkle proof is in "bottom-up" order, starting with a leaf node /// and moving up the tree. Its length will be exactly equal to `depth`. - pub fn generate_proof(&self, index: usize, depth: usize) -> (H256, Vec) { + pub fn generate_proof( + &self, + index: usize, + depth: usize, + ) -> Result<(H256, Vec), MerkleTreeError> { let mut proof = vec![]; let mut current_node = self; let mut current_depth = depth; while current_depth > 0 { let ith_bit = (index >> (current_depth - 1)) & 0x01; + if let &MerkleTree::Finalized(_) = current_node { + return Err(MerkleTreeError::ProofEncounteredFinalizedNode); + } // Note: unwrap is safe because leaves are only ever constructed at depth == 0. let (left, right) = current_node.left_and_right_branches().unwrap(); @@ -187,7 +320,33 @@ impl MerkleTree { // Put proof in bottom-up order. proof.reverse(); - (current_node.hash(), proof) + Ok((current_node.hash(), proof)) + } + + /// useful for debugging + pub fn print_node(&self, mut space: u32) { + const SPACES: u32 = 10; + space += SPACES; + let (pair, text) = match self { + MerkleTree::Node(hash, left, right) => (Some((left, right)), format!("Node({})", hash)), + MerkleTree::Leaf(hash) => (None, format!("Leaf({})", hash)), + MerkleTree::Zero(depth) => ( + None, + format!("Z[{}]({})", depth, H256::from_slice(&ZERO_HASHES[*depth])), + ), + MerkleTree::Finalized(hash) => (None, format!("Finl({})", hash)), + }; + if let Some((_, right)) = pair { + right.print_node(space); + } + println!(); + for _i in SPACES..space { + print!(" "); + } + println!("{}", text); + if let Some((left, _)) = pair { + left.print_node(space); + } } } @@ -235,6 +394,12 @@ impl From for MerkleTreeError { } } +impl From for MerkleTreeError { + fn from(e: InvalidSnapshot) -> Self { + MerkleTreeError::InvalidSnapshot(e) + } +} + #[cfg(test)] mod tests { use super::*; @@ -255,7 +420,9 @@ mod tests { let merkle_root = merkle_tree.hash(); let proofs_ok = (0..leaves.len()).all(|i| { - let (leaf, branch) = merkle_tree.generate_proof(i, depth); + let (leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) }); @@ -274,7 +441,9 @@ mod tests { let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| { assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(())); - let (stored_leaf, branch) = merkle_tree.generate_proof(i, depth); + let (stored_leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash()) }); diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index d91ddabe02..76d85f775d 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -246,6 +246,20 @@ impl Decode for NonZeroUsize { } } +impl Decode for Option { + fn is_ssz_fixed_len() -> bool { + false + } + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let (selector, body) = split_union_bytes(bytes)?; + match selector.into() { + 0u8 => Ok(None), + 1u8 => ::from_ssz_bytes(body).map(Option::Some), + other => Err(DecodeError::UnionSelectorInvalid(other)), + } + } +} + impl Decode for Arc { fn is_ssz_fixed_len() -> bool { T::is_ssz_fixed_len() diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index cfd95ba40d..833480e1b6 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -203,6 +203,34 @@ impl_encode_for_tuples! { } } +impl Encode for Option { + fn is_ssz_fixed_len() -> bool { + false + } + fn ssz_append(&self, buf: &mut Vec) { + match self { + Option::None => { + let union_selector: u8 = 0u8; + buf.push(union_selector); + } + Option::Some(ref inner) => { + let union_selector: u8 = 1u8; + buf.push(union_selector); + inner.ssz_append(buf); + } + } + } + fn ssz_bytes_len(&self) -> usize { + match self { + Option::None => 1usize, + Option::Some(ref inner) => inner + .ssz_bytes_len() + .checked_add(1) + .expect("encoded length must be less than usize::max_value"), + } + } +} + impl Encode for Arc { fn is_ssz_fixed_len() -> bool { T::is_ssz_fixed_len() @@ -561,6 +589,14 @@ mod tests { ); } + #[test] + fn ssz_encode_option_u8() { + let opt: Option = None; + assert_eq!(opt.as_ssz_bytes(), vec![0]); + let opt: Option = Some(2); + assert_eq!(opt.as_ssz_bytes(), vec![1, 2]); + } + #[test] fn ssz_encode_bool() { assert_eq!(true.as_ssz_bytes(), vec![1]); diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs index e41fc15dd4..b4b91da4b5 100644 --- a/consensus/ssz/tests/tests.rs +++ b/consensus/ssz/tests/tests.rs @@ -22,6 +22,13 @@ mod round_trip { round_trip(items); } + #[test] + fn option_u16() { + let items: Vec> = vec![None, Some(2u16)]; + + round_trip(items); + } + #[test] fn u8_array_4() { let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]]; @@ -46,6 +53,17 @@ mod round_trip { round_trip(items); } + #[test] + fn option_vec_h256() { + let items: Vec>> = vec![ + None, + Some(vec![]), + Some(vec![H256::zero(), H256::from([1; 32]), H256::random()]), + ]; + + round_trip(items); + } + #[test] fn vec_u16() { let items: Vec> = vec![ diff --git a/consensus/state_processing/src/common/deposit_data_tree.rs b/consensus/state_processing/src/common/deposit_data_tree.rs index 46f1ed8ccd..aaad96fbd5 100644 --- a/consensus/state_processing/src/common/deposit_data_tree.rs +++ b/consensus/state_processing/src/common/deposit_data_tree.rs @@ -2,12 +2,14 @@ use eth2_hashing::hash; use int_to_bytes::int_to_bytes32; use merkle_proof::{MerkleTree, MerkleTreeError}; use safe_arith::SafeArith; -use types::Hash256; +use types::{DepositTreeSnapshot, FinalizedExecutionBlock, Hash256}; /// Emulates the eth1 deposit contract merkle tree. +#[derive(PartialEq)] pub struct DepositDataTree { tree: MerkleTree, mix_in_length: usize, + finalized_execution_block: Option, depth: usize, } @@ -17,6 +19,7 @@ impl DepositDataTree { Self { tree: MerkleTree::create(leaves, depth), mix_in_length, + finalized_execution_block: None, depth, } } @@ -38,10 +41,10 @@ impl DepositDataTree { /// /// The Merkle proof is in "bottom-up" order, starting with a leaf node /// and moving up the tree. Its length will be exactly equal to `depth + 1`. - pub fn generate_proof(&self, index: usize) -> (Hash256, Vec) { - let (root, mut proof) = self.tree.generate_proof(index, self.depth); + pub fn generate_proof(&self, index: usize) -> Result<(Hash256, Vec), MerkleTreeError> { + let (root, mut proof) = self.tree.generate_proof(index, self.depth)?; proof.push(Hash256::from_slice(&self.length_bytes())); - (root, proof) + Ok((root, proof)) } /// Add a deposit to the merkle tree. @@ -50,4 +53,50 @@ impl DepositDataTree { self.mix_in_length.safe_add_assign(1)?; Ok(()) } + + /// Finalize deposits up to `finalized_execution_block.deposit_count` + pub fn finalize( + &mut self, + finalized_execution_block: FinalizedExecutionBlock, + ) -> Result<(), MerkleTreeError> { + self.tree + .finalize_deposits(finalized_execution_block.deposit_count as usize, self.depth)?; + self.finalized_execution_block = Some(finalized_execution_block); + Ok(()) + } + + /// Get snapshot of finalized deposit tree (if tree is finalized) + pub fn get_snapshot(&self) -> Option { + let finalized_execution_block = self.finalized_execution_block.as_ref()?; + Some(DepositTreeSnapshot { + finalized: self.tree.get_finalized_hashes(), + deposit_root: finalized_execution_block.deposit_root, + deposit_count: finalized_execution_block.deposit_count, + execution_block_hash: finalized_execution_block.block_hash, + execution_block_height: finalized_execution_block.block_height, + }) + } + + /// Create a new Merkle tree from a snapshot + pub fn from_snapshot( + snapshot: &DepositTreeSnapshot, + depth: usize, + ) -> Result { + Ok(Self { + tree: MerkleTree::from_finalized_snapshot( + &snapshot.finalized, + snapshot.deposit_count as usize, + depth, + )?, + mix_in_length: snapshot.deposit_count as usize, + finalized_execution_block: Some(snapshot.into()), + depth, + }) + } + + #[allow(dead_code)] + pub fn print_tree(&self) { + self.tree.print_node(0); + println!("========================================================"); + } } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 68fdbf7990..d1b2ae1823 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "types" -version = "0.2.0" +version = "0.2.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs new file mode 100644 index 0000000000..21bbab81ff --- /dev/null +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -0,0 +1,83 @@ +use crate::*; +use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use int_to_bytes::int_to_bytes32; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use test_utils::TestRandom; +use DEPOSIT_TREE_DEPTH; + +#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] +pub struct FinalizedExecutionBlock { + pub deposit_root: Hash256, + pub deposit_count: u64, + pub block_hash: Hash256, + pub block_height: u64, +} + +impl From<&DepositTreeSnapshot> for FinalizedExecutionBlock { + fn from(snapshot: &DepositTreeSnapshot) -> Self { + Self { + deposit_root: snapshot.deposit_root, + deposit_count: snapshot.deposit_count, + block_hash: snapshot.execution_block_hash, + block_height: snapshot.execution_block_height, + } + } +} + +#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] +pub struct DepositTreeSnapshot { + pub finalized: Vec, + pub deposit_root: Hash256, + pub deposit_count: u64, + pub execution_block_hash: Hash256, + pub execution_block_height: u64, +} + +impl Default for DepositTreeSnapshot { + fn default() -> Self { + let mut result = Self { + finalized: vec![], + deposit_root: Hash256::default(), + deposit_count: 0, + execution_block_hash: Hash256::zero(), + execution_block_height: 0, + }; + // properly set the empty deposit root + result.deposit_root = result.calculate_root().unwrap(); + result + } +} + +impl DepositTreeSnapshot { + // Calculates the deposit tree root from the hashes in the snapshot + pub fn calculate_root(&self) -> Option { + let mut size = self.deposit_count; + let mut index = self.finalized.len(); + let mut deposit_root = [0; 32]; + for height in 0..DEPOSIT_TREE_DEPTH { + deposit_root = if (size & 1) == 1 { + index = index.checked_sub(1)?; + hash32_concat(self.finalized.get(index)?.as_bytes(), &deposit_root) + } else { + hash32_concat(&deposit_root, ZERO_HASHES.get(height)?) + }; + size /= 2; + } + // add mix-in-length + deposit_root = hash32_concat(&deposit_root, &int_to_bytes32(self.deposit_count)); + + Some(Hash256::from_slice(&deposit_root)) + } + pub fn is_valid(&self) -> bool { + self.calculate_root() + .map_or(false, |calculated| self.deposit_root == calculated) + } +} + +#[cfg(test)] +mod tests { + use super::*; + ssz_tests!(DepositTreeSnapshot); +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7f618dc348..4a6cc57b11 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -36,6 +36,7 @@ pub mod contribution_and_proof; pub mod deposit; pub mod deposit_data; pub mod deposit_message; +pub mod deposit_tree_snapshot; pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; @@ -120,6 +121,7 @@ pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; +pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 20147adb9f..cb50a4ee82 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -256,6 +256,7 @@ pub fn migrate_db( migrate_schema::, _, _, _>>( db, + client_config.eth1.deposit_contract_deploy_block, &client_config.get_data_dir(), from, to, diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 005a74edf6..1f869562d1 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -77,6 +77,7 @@ const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; +const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -291,6 +292,7 @@ impl ProductionValidatorClient { / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, + get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, } } else { Timeouts::set_all(slot_duration)