From b2590bcb37784f7f3540aa10a9cca123e9c66777 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 12 Dec 2024 09:51:46 +1100 Subject: [PATCH 01/25] Tweak reconstruction batch size (#6668) * Tweak reconstruction batch size * Merge branch 'release-v6.0.1' into reconstruction-batch-size --- beacon_node/beacon_chain/src/migrate.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 37a2e8917b..bc4b8e1ed8 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -26,8 +26,10 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200; const COMPACTION_FINALITY_DISTANCE: u64 = 1024; /// Maximum number of blocks applied in each reconstruction burst. /// -/// This limits the amount of time that the finalization migration is paused for. -const BLOCKS_PER_RECONSTRUCTION: usize = 8192 * 4; +/// This limits the amount of time that the finalization migration is paused for. We set this +/// conservatively because pausing the finalization migration for too long can cause hot state +/// cache misses and excessive disk use. +const BLOCKS_PER_RECONSTRUCTION: usize = 1024; /// Default number of epochs to wait between finalization migrations. pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1; From b7ffcc8229e028bf43ddca5c5924b9ec10bd6931 Mon Sep 17 00:00:00 2001 From: antondlr Date: Thu, 12 Dec 2024 01:24:58 +0100 Subject: [PATCH 02/25] Fix: Docker CI to use org tokens (#6655) * update Dockerhub creds to new scheme * Merge branch 'release-v6.0.1' into fix-docker-ci --- .github/workflows/docker.yml | 4 ++-- .github/workflows/release.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index cd45bd6d98..e768208973 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -13,8 +13,8 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f1ec2e4655..cfba601fad 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,8 +10,8 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} REPO_NAME: ${{ github.repository_owner }}/lighthouse IMAGE_NAME: ${{ github.repository_owner }}/lighthouse # Enable self-hosted runners for the sigp repo only. From fc0e0ae613a479a21e931b200f88b6e4ff9e6681 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 12 Dec 2024 12:58:41 +1100 Subject: [PATCH 03/25] Prevent reconstruction starting prematurely (#6669) * Prevent reconstruction starting prematurely * Simplify condition * Merge remote-tracking branch 'origin/release-v6.0.1' into dont-start-reconstruction-early --- beacon_node/beacon_chain/src/builder.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 589db0af50..9d99ff9d8e 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1037,7 +1037,9 @@ where ); // Check for states to reconstruct (in the background). - if beacon_chain.config.reconstruct_historic_states { + if beacon_chain.config.reconstruct_historic_states + && beacon_chain.store.get_oldest_block_slot() == 0 + { beacon_chain.store_migrator.process_reconstruction(); } From 494634399027b94f31759ba5bb4d3a5d2aaff503 Mon Sep 17 00:00:00 2001 From: Povilas Liubauskas Date: Thu, 12 Dec 2024 10:36:34 +0200 Subject: [PATCH 04/25] Fix subscribing to attestation subnets for aggregating (#6681) (#6682) * Fix subscribing to attestation subnets for aggregating (#6681) * Prevent scheduled subnet subscriptions from being overwritten by other subscriptions from same subnet with additional scoping by slot --- beacon_node/network/src/subnet_service/mod.rs | 9 ++- .../network/src/subnet_service/tests/mod.rs | 55 +++++++++++++++++-- 2 files changed, 57 insertions(+), 7 deletions(-) diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index ec6f3b10a3..da1f220f04 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -86,7 +86,7 @@ pub struct SubnetService { subscriptions: HashSetDelay, /// Subscriptions that need to be executed in the future. - scheduled_subscriptions: HashSetDelay, + scheduled_subscriptions: HashSetDelay, /// A list of permanent subnets that this node is subscribed to. // TODO: Shift this to a dynamic bitfield @@ -484,8 +484,10 @@ impl SubnetService { self.subscribe_to_subnet_immediately(subnet, slot + 1)?; } else { // This is a future slot, schedule subscribing. + // We need to include the slot to make the key unique to prevent overwriting the entry + // for the same subnet. self.scheduled_subscriptions - .insert_at(subnet, time_to_subscription_start); + .insert_at(ExactSubnet { subnet, slot }, time_to_subscription_start); } Ok(()) @@ -626,7 +628,8 @@ impl Stream for SubnetService { // Process scheduled subscriptions that might be ready, since those can extend a soon to // expire subscription. match self.scheduled_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(subnet))) => { + Poll::Ready(Some(Ok(exact_subnet))) => { + let ExactSubnet { subnet, .. } = exact_subnet; let current_slot = self.beacon_chain.slot_clock.now().unwrap_or_default(); if let Err(e) = self.subscribe_to_subnet_immediately(subnet, current_slot + 1) { debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet, "err" => e); diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 91e4841b26..7283b4af31 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -500,12 +500,15 @@ mod test { // subscription config let committee_count = 1; - // Makes 2 validator subscriptions to the same subnet but at different slots. - // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). + // Makes 3 validator subscriptions to the same subnet but at different slots. + // There should be just 1 unsubscription event for each of the later slots subscriptions + // (subscription_slot2 and subscription_slot3). let subscription_slot1 = 0; let subscription_slot2 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; + let subscription_slot3 = subscription_slot2 * 2; let com1 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; let com2 = 0; + let com3 = CHAIN.chain.spec.attestation_subnet_count - com1; // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); @@ -532,6 +535,13 @@ mod test { true, ); + let sub3 = get_subscription( + com3, + current_slot + Slot::new(subscription_slot3), + committee_count, + true, + ); + let subnet_id1 = SubnetId::compute_subnet::( current_slot + Slot::new(subscription_slot1), com1, @@ -548,12 +558,23 @@ mod test { ) .unwrap(); + let subnet_id3 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot3), + com3, + committee_count, + &subnet_service.beacon_chain.spec, + ) + .unwrap(); + // Assert that subscriptions are different but their subnet is the same assert_ne!(sub1, sub2); + assert_ne!(sub1, sub3); + assert_ne!(sub2, sub3); assert_eq!(subnet_id1, subnet_id2); + assert_eq!(subnet_id1, subnet_id3); // submit the subscriptions - subnet_service.validator_subscriptions(vec![sub1, sub2].into_iter()); + subnet_service.validator_subscriptions(vec![sub1, sub2, sub3].into_iter()); // Unsubscription event should happen at the end of the slot. // We wait for 2 slots, to avoid timeout issues @@ -590,10 +611,36 @@ mod test { // If the permanent and short lived subnets are different, we should get an unsubscription event. if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { assert_eq!( - [expected_subscription, expected_unsubscription], + [ + expected_subscription.clone(), + expected_unsubscription.clone(), + ], second_subscribe_event[..] ); } + + let subscription_slot = current_slot + subscription_slot3 - 1; + + let wait_slots = subnet_service + .beacon_chain + .slot_clock + .duration_to_slot(subscription_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; + + let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); + + assert_eq!(no_events, []); + + let third_subscribe_event = get_events(&mut subnet_service, None, 2).await; + + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + assert_eq!( + [expected_subscription, expected_unsubscription], + third_subscribe_event[..] + ); + } } #[tokio::test] From 775fa6730b2ddd60b87344761cccf7a05b2a72d4 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 13 Dec 2024 13:02:10 +0800 Subject: [PATCH 05/25] Stuck lookup v6 (#6658) * Fix stuck lookups if no peers on v6 * Merge branch 'release-v6.0.1' into stuck-lookup-v6 --- .../network/src/sync/block_lookups/single_block_lookup.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index d701cbbb8d..9bbd2bf295 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -171,7 +171,10 @@ impl SingleBlockLookup { self.awaiting_parent.is_some() || self.block_request_state.state.is_awaiting_event() || match &self.component_requests { - ComponentRequests::WaitingForBlock => true, + // If components are waiting for the block request to complete, here we should + // check if the`block_request_state.state.is_awaiting_event(). However we already + // checked that above, so `WaitingForBlock => false` is equivalent. + ComponentRequests::WaitingForBlock => false, ComponentRequests::ActiveBlobRequest(request, _) => { request.state.is_awaiting_event() } From f3b78889e50752f40e6d371621764b49bca4090f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 14 Dec 2024 19:43:00 +1100 Subject: [PATCH 06/25] Compact more when pruning states (#6667) * Compact more when pruning states * Merge branch 'release-v6.0.1' into compact-more --- .../src/schema_change/migration_schema_v22.rs | 2 +- beacon_node/store/src/hot_cold_store.rs | 42 ++++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs index f532c0e672..c34512eded 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs @@ -152,7 +152,7 @@ pub fn delete_old_schema_freezer_data( db.cold_db.do_atomically(cold_ops)?; // In order to reclaim space, we need to compact the freezer DB as well. - db.cold_db.compact()?; + db.compact_freezer()?; Ok(()) } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4942b14881..da3e6d4ebc 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2484,6 +2484,45 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + /// Run a compaction pass on the freezer DB to free up space used by deleted states. + pub fn compact_freezer(&self) -> Result<(), Error> { + let current_schema_columns = vec![ + DBColumn::BeaconColdStateSummary, + DBColumn::BeaconStateSnapshot, + DBColumn::BeaconStateDiff, + DBColumn::BeaconStateRoots, + ]; + + // We can remove this once schema V21 has been gone for a while. + let previous_schema_columns = vec![ + DBColumn::BeaconState, + DBColumn::BeaconStateSummary, + DBColumn::BeaconBlockRootsChunked, + DBColumn::BeaconStateRootsChunked, + DBColumn::BeaconRestorePoint, + DBColumn::BeaconHistoricalRoots, + DBColumn::BeaconRandaoMixes, + DBColumn::BeaconHistoricalSummaries, + ]; + let mut columns = current_schema_columns; + columns.extend(previous_schema_columns); + + for column in columns { + info!( + self.log, + "Starting compaction"; + "column" => ?column + ); + self.cold_db.compact_column(column)?; + info!( + self.log, + "Finishing compaction"; + "column" => ?column + ); + } + Ok(()) + } + /// Return `true` if compaction on finalization/pruning is enabled. pub fn compact_on_prune(&self) -> bool { self.config.compact_on_prune @@ -2875,6 +2914,7 @@ impl, Cold: ItemStore> HotColdDB // // We can remove this once schema V21 has been gone for a while. let previous_schema_columns = vec![ + DBColumn::BeaconState, DBColumn::BeaconStateSummary, DBColumn::BeaconBlockRootsChunked, DBColumn::BeaconStateRootsChunked, @@ -2916,7 +2956,7 @@ impl, Cold: ItemStore> HotColdDB self.cold_db.do_atomically(cold_ops)?; // In order to reclaim space, we need to compact the freezer DB as well. - self.cold_db.compact()?; + self.compact_freezer()?; Ok(()) } From c3a0757ad2c0d70bb0686463e6d5c4a2041114a3 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 16 Dec 2024 10:16:53 +1100 Subject: [PATCH 07/25] Correct `/nat` API for libp2p (#6677) * Fix nat API --- .../lighthouse_network/src/peer_manager/network_behaviour.rs | 4 ---- common/system_health/src/lib.rs | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 11676f9a01..9fd059df85 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -141,10 +141,6 @@ impl NetworkBehaviour for PeerManager { debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %ClearDialError(error)); self.on_dial_failure(peer_id); } - FromSwarm::ExternalAddrConfirmed(_) => { - // We have an external address confirmed, means we are able to do NAT traversal. - metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p"], 1); - } _ => { // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release // notes more than compiler feedback diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs index 3431189842..9f351e943b 100644 --- a/common/system_health/src/lib.rs +++ b/common/system_health/src/lib.rs @@ -235,14 +235,14 @@ pub fn observe_nat() -> NatState { let libp2p_ipv4 = lighthouse_network::metrics::get_int_gauge( &lighthouse_network::metrics::NAT_OPEN, - &["libp2p"], + &["libp2p_ipv4"], ) .map(|g| g.get() == 1) .unwrap_or_default(); let libp2p_ipv6 = lighthouse_network::metrics::get_int_gauge( &lighthouse_network::metrics::NAT_OPEN, - &["libp2p"], + &["libp2p_ipv6"], ) .map(|g| g.get() == 1) .unwrap_or_default(); From 0d90135047519f4c2ee586d50e560f7bb2ff9b10 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 16 Dec 2024 14:03:22 +1100 Subject: [PATCH 08/25] Release v6.0.1 (#6659) * Release v6.0.1 --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ddeecf711..c9744f500d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -833,7 +833,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "6.0.0" +version = "6.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -1078,7 +1078,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "6.0.0" +version = "6.0.1" dependencies = [ "beacon_node", "bytes", @@ -4674,7 +4674,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "6.0.0" +version = "6.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -5244,7 +5244,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "6.0.0" +version = "6.0.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index fd4f0f6d4a..15cdf15dc5 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "6.0.0" +version = "6.0.1" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 07e51597e3..0751bdadff 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v6.0.0-", - fallback = "Lighthouse/v6.0.0" + prefix = "Lighthouse/v6.0.1-", + fallback = "Lighthouse/v6.0.1" ); /// Returns the first eight characters of the latest commit hash for this build. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 88daddd8aa..9612bded47 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "6.0.0" +version = "6.0.1" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 329519fb54..fa426daffa 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "6.0.0" +version = "6.0.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From c92c07ff498721d9eea60db8a5acfde399f47eea Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 16 Dec 2024 12:33:33 +0800 Subject: [PATCH 09/25] Track beacon processor import result metrics (#6541) * Track beacon processor import result metrics * Update metric name --- .../beacon_chain/src/block_verification.rs | 3 +- beacon_node/network/src/metrics.rs | 62 +++++++++++++++- .../gossip_methods.rs | 70 +++++++++---------- .../network_beacon_processor/sync_methods.rs | 7 +- 4 files changed, 99 insertions(+), 43 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 4c5f53248f..ddb7bb614a 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -92,6 +92,7 @@ use std::fs; use std::io::Write; use std::sync::Arc; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use strum::AsRefStr; use task_executor::JoinHandle; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, @@ -137,7 +138,7 @@ const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); /// /// - The block is malformed/invalid (indicated by all results other than `BeaconChainError`. /// - We encountered an error whilst trying to verify the block (a `BeaconChainError`). -#[derive(Debug)] +#[derive(Debug, AsRefStr)] pub enum BlockError { /// The parent block was unknown. /// diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 4b7e8a50a3..154a59eade 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -2,7 +2,8 @@ use beacon_chain::{ attestation_verification::Error as AttnError, light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, - sync_committee_verification::Error as SyncCommitteeError, + sync_committee_verification::Error as SyncCommitteeError, AvailabilityProcessingStatus, + BlockError, }; use fnv::FnvHashMap; use lighthouse_network::{ @@ -11,12 +12,19 @@ use lighthouse_network::{ }; pub use metrics::*; use std::sync::{Arc, LazyLock}; +use strum::AsRefStr; use strum::IntoEnumIterator; use types::EthSpec; pub const SUCCESS: &str = "SUCCESS"; pub const FAILURE: &str = "FAILURE"; +#[derive(Debug, AsRefStr)] +pub(crate) enum BlockSource { + Gossip, + Rpc, +} + pub static BEACON_BLOCK_MESH_PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( @@ -59,6 +67,27 @@ pub static SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: LazyLock> = ) }); +/* + * Beacon processor + */ +pub static BEACON_PROCESSOR_MISSING_COMPONENTS: LazyLock> = LazyLock::new( + || { + try_create_int_counter_vec( + "beacon_processor_missing_components_total", + "Total number of imported individual block components that resulted in missing components", + &["source", "component"], + ) + }, +); +pub static BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_processor_import_errors_total", + "Total number of block components that were not verified", + &["source", "component", "type"], + ) + }); + /* * Gossip processor */ @@ -606,6 +635,37 @@ pub fn register_sync_committee_error(error: &SyncCommitteeError) { inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); } +pub(crate) fn register_process_result_metrics( + result: &std::result::Result, + source: BlockSource, + block_component: &'static str, +) { + match result { + Ok(status) => match status { + AvailabilityProcessingStatus::Imported { .. } => match source { + BlockSource::Gossip => { + inc_counter(&BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); + } + BlockSource::Rpc => { + inc_counter(&BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); + } + }, + AvailabilityProcessingStatus::MissingComponents { .. } => { + inc_counter_vec( + &BEACON_PROCESSOR_MISSING_COMPONENTS, + &[source.as_ref(), block_component], + ); + } + }, + Err(error) => { + inc_counter_vec( + &BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE, + &[source.as_ref(), block_component, error.as_ref()], + ); + } + } +} + pub fn from_result(result: &std::result::Result) -> &str { match result { Ok(_) => SUCCESS, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 317bfb104b..4fc83b0923 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1,5 +1,5 @@ use crate::{ - metrics, + metrics::{self, register_process_result_metrics}, network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}, service::NetworkMessage, sync::SyncMessage, @@ -915,12 +915,11 @@ impl NetworkBeaconProcessor { let blob_index = verified_blob.id().index; let result = self.chain.process_gossip_blob(verified_blob).await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "blob"); match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - // Note: Reusing block imported metric here - metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); - debug!( + info!( self.log, "Gossipsub blob processed - imported fully available block"; "block_root" => %block_root @@ -989,43 +988,39 @@ impl NetworkBeaconProcessor { let data_column_slot = verified_data_column.slot(); let data_column_index = verified_data_column.id().index; - match self + let result = self .chain .process_gossip_data_columns(vec![verified_data_column], || Ok(())) - .await - { - Ok(availability) => { - match availability { - AvailabilityProcessingStatus::Imported(block_root) => { - // Note: Reusing block imported metric here - metrics::inc_counter( - &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL, - ); - info!( - self.log, - "Gossipsub data column processed, imported fully available block"; - "block_root" => %block_root - ); - self.chain.recompute_head_at_current_slot().await; + .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "data_column"); - metrics::set_gauge( - &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, - processing_start_time.elapsed().as_millis() as i64, - ); - } - AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { - trace!( - self.log, - "Processed data column, waiting for other components"; - "slot" => %slot, - "data_column_index" => %data_column_index, - "block_root" => %block_root, - ); + match result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(block_root) => { + info!( + self.log, + "Gossipsub data column processed, imported fully available block"; + "block_root" => %block_root + ); + self.chain.recompute_head_at_current_slot().await; - self.attempt_data_column_reconstruction(block_root).await; - } + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, + processing_start_time.elapsed().as_millis() as i64, + ); } - } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + trace!( + self.log, + "Processed data column, waiting for other components"; + "slot" => %slot, + "data_column_index" => %data_column_index, + "block_root" => %block_root, + ); + + self.attempt_data_column_reconstruction(block_root).await; + } + }, Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, @@ -1467,11 +1462,10 @@ impl NetworkBeaconProcessor { NotifyExecutionLayer::Yes, ) .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "block"); match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); - if reprocess_tx .try_send(ReprocessQueueMessage::BlockImported { block_root: *block_root, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 6c6bb26ee0..817e6b6440 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1,4 +1,4 @@ -use crate::metrics; +use crate::metrics::{self, register_process_result_metrics}; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::sync::BatchProcessResult; use crate::sync::{ @@ -163,8 +163,7 @@ impl NetworkBeaconProcessor { NotifyExecutionLayer::Yes, ) .await; - - metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "block"); // RPC block imported, regardless of process type match result.as_ref() { @@ -286,6 +285,7 @@ impl NetworkBeaconProcessor { } let result = self.chain.process_rpc_blobs(slot, block_root, blobs).await; + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "blobs"); match &result { Ok(AvailabilityProcessingStatus::Imported(hash)) => { @@ -343,6 +343,7 @@ impl NetworkBeaconProcessor { .chain .process_rpc_custody_columns(custody_columns) .await; + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "custody_columns"); match &result { Ok(availability) => match availability { From 11e1d5bf148784d1ccbaf8b1023e26b3d0fb4cd1 Mon Sep 17 00:00:00 2001 From: Jun Song <87601811+syjn99@users.noreply.github.com> Date: Mon, 16 Dec 2024 14:43:54 +0900 Subject: [PATCH 10/25] Add CLI flag for HTTP API token path (VC) (#6577) * Add cli flag for HTTP API token path (VC) * Add http_token_path_flag test * Add pre-check for directory case & Fix test utils * Update docs * Apply review: move http_token_path into validator_http_api config * Lint * Make diff lesser to replace PK_FILENAME * Merge branch 'unstable' into feature/cli-token-path * Applt review: help_vc.md Co-authored-by: chonghe <44791194+chong-he@users.noreply.github.com> * Fix help for cli * Fix issues on ci * Merge branch 'unstable' into feature/cli-token-path * Merge branch 'unstable' into feature/cli-token-path * Merge branch 'unstable' into feature/cli-token-path * Merge branch 'unstable' into feature/cli-token-path --- Cargo.lock | 2 ++ book/src/api-vc-auth-header.md | 3 +- book/src/api-vc-endpoints.md | 2 +- book/src/help_vc.md | 4 +++ lighthouse/tests/validator_client.rs | 15 +++++++++ validator_client/http_api/Cargo.toml | 2 ++ validator_client/http_api/src/api_secret.rs | 37 ++++++++++++++++----- validator_client/http_api/src/lib.rs | 10 ++++++ validator_client/http_api/src/test_utils.rs | 9 +++-- validator_client/http_api/src/tests.rs | 8 +++-- validator_client/src/cli.rs | 12 +++++++ validator_client/src/config.rs | 8 ++++- validator_client/src/lib.rs | 2 +- 13 files changed, 96 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d0d38c1ae..2978a3a19f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9552,6 +9552,8 @@ dependencies = [ "beacon_node_fallback", "bls", "deposit_contract", + "directory", + "dirs", "doppelganger_service", "eth2", "eth2_keystore", diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index adde78270a..feb93724c0 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -18,7 +18,8 @@ Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ## Obtaining the API token The API token is stored as a file in the `validators` directory. For most users -this is `~/.lighthouse/{network}/validators/api-token.txt`. Here's an +this is `~/.lighthouse/{network}/validators/api-token.txt`, unless overridden using the +`--http-token-path` CLI parameter. Here's an example using the `cat` command to print the token to the terminal, but any text editor will suffice: diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 80eba7a059..98605a3dcd 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -53,7 +53,7 @@ Example Response Body: } ``` -> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/api-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. +> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/api-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you've specified a custom token path using `--http-token-path`, use that path instead. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. > As an alternative, you can also provide the API token directly, for example, `-H "Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh`. In this case, you obtain the token from the file `api-token.txt` and the command becomes: diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 2cfbfbc857..71e21d68c9 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -69,6 +69,10 @@ Options: this server (e.g., http://localhost:5062). --http-port Set the listen TCP port for the RESTful HTTP API server. + --http-token-path + Path to file containing the HTTP API token for validator client + authentication. If not specified, defaults to + {validators-dir}/api-token.txt. --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 34fe04cc45..587001f77b 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -344,6 +344,21 @@ fn http_store_keystore_passwords_in_secrets_dir_present() { .with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir)); } +#[test] +fn http_token_path_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("http", None) + .flag("http-token-path", dir.path().join("api-token.txt").to_str()) + .run() + .with_config(|config| { + assert_eq!( + config.http_api.http_token_path, + dir.path().join("api-token.txt") + ); + }); +} + // Tests for Metrics flags. #[test] fn metrics_flag() { diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 18e0604ad5..96c836f6f3 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -13,7 +13,9 @@ account_utils = { workspace = true } bls = { workspace = true } beacon_node_fallback = { workspace = true } deposit_contract = { workspace = true } +directory = { workspace = true } doppelganger_service = { workspace = true } +dirs = { workspace = true } graffiti_file = { workspace = true } eth2 = { workspace = true } eth2_keystore = { workspace = true } diff --git a/validator_client/http_api/src/api_secret.rs b/validator_client/http_api/src/api_secret.rs index afcac477ec..bac54dc8b2 100644 --- a/validator_client/http_api/src/api_secret.rs +++ b/validator_client/http_api/src/api_secret.rs @@ -5,7 +5,7 @@ use std::fs; use std::path::{Path, PathBuf}; use warp::Filter; -/// The name of the file which stores the API token. +/// The default name of the file which stores the API token. pub const PK_FILENAME: &str = "api-token.txt"; pub const PK_LEN: usize = 33; @@ -31,14 +31,32 @@ pub struct ApiSecret { impl ApiSecret { /// If the public key is already on-disk, use it. /// - /// The provided `dir` is a directory containing `PK_FILENAME`. + /// The provided `pk_path` is a path containing API token. /// /// If the public key file is missing on disk, create a new key and /// write it to disk (over-writing any existing files). - pub fn create_or_open>(dir: P) -> Result { - let pk_path = dir.as_ref().join(PK_FILENAME); + pub fn create_or_open>(pk_path: P) -> Result { + let pk_path = pk_path.as_ref(); + + // Check if the path is a directory + if pk_path.is_dir() { + return Err(format!( + "API token path {:?} is a directory, not a file", + pk_path + )); + } if !pk_path.exists() { + // Create parent directories if they don't exist + if let Some(parent) = pk_path.parent() { + std::fs::create_dir_all(parent).map_err(|e| { + format!( + "Unable to create parent directories for {:?}: {:?}", + pk_path, e + ) + })?; + } + let length = PK_LEN; let pk: String = thread_rng() .sample_iter(&Alphanumeric) @@ -47,7 +65,7 @@ impl ApiSecret { .collect(); // Create and write the public key to file with appropriate permissions - create_with_600_perms(&pk_path, pk.to_string().as_bytes()).map_err(|e| { + create_with_600_perms(pk_path, pk.to_string().as_bytes()).map_err(|e| { format!( "Unable to create file with permissions for {:?}: {:?}", pk_path, e @@ -55,13 +73,16 @@ impl ApiSecret { })?; } - let pk = fs::read(&pk_path) - .map_err(|e| format!("cannot read {}: {}", PK_FILENAME, e))? + let pk = fs::read(pk_path) + .map_err(|e| format!("cannot read {}: {}", pk_path.display(), e))? .iter() .map(|&c| char::from(c)) .collect(); - Ok(Self { pk, pk_path }) + Ok(Self { + pk, + pk_path: pk_path.to_path_buf(), + }) } /// Returns the API token. diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index b58c7ccec0..f3dab3780c 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -7,6 +7,7 @@ mod remotekeys; mod tests; pub mod test_utils; +pub use api_secret::PK_FILENAME; use graffiti::{delete_graffiti, get_graffiti, set_graffiti}; @@ -23,6 +24,7 @@ use beacon_node_fallback::CandidateInfo; use create_validator::{ create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, }; +use directory::{DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_VALIDATOR_DIR}; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{ @@ -99,10 +101,17 @@ pub struct Config { pub allow_origin: Option, pub allow_keystore_export: bool, pub store_passwords_in_secrets_dir: bool, + pub http_token_path: PathBuf, } impl Default for Config { fn default() -> Self { + let http_token_path = dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(DEFAULT_ROOT_DIR) + .join(DEFAULT_HARDCODED_NETWORK) + .join(DEFAULT_VALIDATOR_DIR) + .join(PK_FILENAME); Self { enabled: false, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -110,6 +119,7 @@ impl Default for Config { allow_origin: None, allow_keystore_export: false, store_passwords_in_secrets_dir: false, + http_token_path, } } } diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index d033fdbf2d..390095eec7 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -1,3 +1,4 @@ +use crate::api_secret::PK_FILENAME; use crate::{ApiSecret, Config as HttpConfig, Context}; use account_utils::validator_definitions::ValidatorDefinitions; use account_utils::{ @@ -73,6 +74,7 @@ impl ApiTester { let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); + let token_path = tempdir().unwrap().path().join(PK_FILENAME); let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); @@ -85,7 +87,7 @@ impl ApiTester { .await .unwrap(); - let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_secret = ApiSecret::create_or_open(token_path).unwrap(); let api_pubkey = api_secret.api_token(); let config = ValidatorStoreConfig { @@ -177,6 +179,7 @@ impl ApiTester { allow_origin: None, allow_keystore_export: true, store_passwords_in_secrets_dir: false, + http_token_path: tempdir().unwrap().path().join(PK_FILENAME), } } @@ -199,8 +202,8 @@ impl ApiTester { } pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { - let tmp = tempdir().unwrap(); - let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let tmp = tempdir().unwrap().path().join("invalid-token.txt"); + let api_secret = ApiSecret::create_or_open(tmp).unwrap(); let invalid_pubkey = api_secret.api_token(); ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey).unwrap() } diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index 262bb64e69..027b10e246 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -63,6 +63,7 @@ impl ApiTester { let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); + let token_path = tempdir().unwrap().path().join("api-token.txt"); let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); @@ -75,7 +76,7 @@ impl ApiTester { .await .unwrap(); - let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_secret = ApiSecret::create_or_open(&token_path).unwrap(); let api_pubkey = api_secret.api_token(); let spec = Arc::new(E::default_spec()); @@ -127,6 +128,7 @@ impl ApiTester { allow_origin: None, allow_keystore_export: true, store_passwords_in_secrets_dir: false, + http_token_path: token_path, }, sse_logging_components: None, log, @@ -161,8 +163,8 @@ impl ApiTester { } pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { - let tmp = tempdir().unwrap(); - let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let tmp = tempdir().unwrap().path().join("invalid-token.txt"); + let api_secret = ApiSecret::create_or_open(tmp).unwrap(); let invalid_pubkey = api_secret.api_token(); ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey.clone()).unwrap() } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 209876f07b..b2d1ebb3c2 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -247,6 +247,18 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) + .arg( + Arg::new("http-token-path") + .long("http-token-path") + .requires("http") + .value_name("HTTP_TOKEN_PATH") + .help( + "Path to file containing the HTTP API token for validator client authentication. \ + If not specified, defaults to {validators-dir}/api-token.txt." + ) + .action(ArgAction::Set) + .display_order(0) + ) /* Prometheus metrics HTTP server related arguments */ .arg( Arg::new("metrics") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index abdadeb393..0fecb5202d 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -17,7 +17,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; -use validator_http_api; +use validator_http_api::{self, PK_FILENAME}; use validator_http_metrics; use validator_store::Config as ValidatorStoreConfig; @@ -314,6 +314,12 @@ impl Config { config.http_api.store_passwords_in_secrets_dir = true; } + if cli_args.get_one::("http-token-path").is_some() { + config.http_api.http_token_path = parse_required(cli_args, "http-token-path") + // For backward compatibility, default to the path under the validator dir if not provided. + .unwrap_or_else(|_| config.validator_dir.join(PK_FILENAME)); + } + /* * Prometheus metrics HTTP server */ diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 2cc22357fb..8ebfe98b15 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -551,7 +551,7 @@ impl ProductionValidatorClient { let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity); let log = self.context.log(); - let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?; + let api_secret = ApiSecret::create_or_open(&self.config.http_api.http_token_path)?; self.http_api_listen_addr = if self.config.http_api.enabled { let ctx = Arc::new(validator_http_api::Context { From 86891e6d0f111c318660aaea63ed39c58dd716a5 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 15 Dec 2024 21:43:58 -0800 Subject: [PATCH 11/25] builder gas limit & some refactoring (#6583) * Cache gas_limit * Payload Parameters Refactor * Enforce Proposer Gas Limit * Fixed and Added New Tests * Fix Beacon Chain Tests --- .../beacon_chain/src/execution_payload.rs | 24 +- .../tests/payload_invalidation.rs | 22 +- beacon_node/execution_layer/src/lib.rs | 284 +++++++++++------- .../test_utils/execution_block_generator.rs | 35 ++- .../src/test_utils/mock_builder.rs | 63 +++- .../src/test_utils/mock_execution_layer.rs | 29 +- .../execution_layer/src/test_utils/mod.rs | 5 +- beacon_node/http_api/src/lib.rs | 27 +- .../http_api/tests/interactive_tests.rs | 13 +- beacon_node/http_api/tests/tests.rs | 234 +++++++++++---- consensus/types/src/chain_spec.rs | 27 ++ consensus/types/src/payload.rs | 33 ++ testing/ef_tests/src/cases/fork_choice.rs | 11 +- .../src/test_rig.rs | 34 ++- 14 files changed, 598 insertions(+), 243 deletions(-) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index f2420eea0d..5e13f0624d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -14,7 +14,7 @@ use crate::{ }; use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, NewPayloadRequest, - PayloadAttributes, PayloadStatus, + PayloadAttributes, PayloadParameters, PayloadStatus, }; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; @@ -375,8 +375,9 @@ pub fn get_execution_payload( let timestamp = compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; - let latest_execution_payload_header_block_hash = - state.latest_execution_payload_header()?.block_hash(); + let latest_execution_payload_header = state.latest_execution_payload_header()?; + let latest_execution_payload_header_block_hash = latest_execution_payload_header.block_hash(); + let latest_execution_payload_header_gas_limit = latest_execution_payload_header.gas_limit(); let withdrawals = match state { &BeaconState::Capella(_) | &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { Some(get_expected_withdrawals(state, spec)?.0.into()) @@ -406,6 +407,7 @@ pub fn get_execution_payload( random, proposer_index, latest_execution_payload_header_block_hash, + latest_execution_payload_header_gas_limit, builder_params, withdrawals, parent_beacon_block_root, @@ -443,6 +445,7 @@ pub async fn prepare_execution_payload( random: Hash256, proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, + latest_execution_payload_header_gas_limit: u64, builder_params: BuilderParams, withdrawals: Option>, parent_beacon_block_root: Option, @@ -526,13 +529,20 @@ where parent_beacon_block_root, ); + let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await; + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit: latest_execution_payload_header_gas_limit, + proposer_gas_limit: target_gas_limit, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: fork, + }; + let block_contents = execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - fork, &chain.spec, builder_boost_factor, block_production_version, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 1325875a27..729d88450f 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -986,10 +986,13 @@ async fn payload_preparation() { // Provide preparation data to the EL for `proposer`. el.update_proposer_preparation( Epoch::new(1), - &[ProposerPreparationData { - validator_index: proposer as u64, - fee_recipient, - }], + [( + &ProposerPreparationData { + validator_index: proposer as u64, + fee_recipient, + }, + &None, + )], ) .await; @@ -1119,10 +1122,13 @@ async fn payload_preparation_before_transition_block() { // Provide preparation data to the EL for `proposer`. el.update_proposer_preparation( Epoch::new(0), - &[ProposerPreparationData { - validator_index: proposer as u64, - fee_recipient, - }], + [( + &ProposerPreparationData { + validator_index: proposer as u64, + fee_recipient, + }, + &None, + )], ) .await; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 08a00d7bf8..ae0dca9833 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -28,7 +28,7 @@ use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; -use std::collections::HashMap; +use std::collections::{hash_map::Entry, HashMap}; use std::fmt; use std::future::Future; use std::io::Write; @@ -319,10 +319,52 @@ impl> BlockProposalContents { + pub parent_hash: ExecutionBlockHash, + pub parent_gas_limit: u64, + pub proposer_gas_limit: Option, + pub payload_attributes: &'a PayloadAttributes, + pub forkchoice_update_params: &'a ForkchoiceUpdateParameters, + pub current_fork: ForkName, +} + #[derive(Clone, PartialEq)] pub struct ProposerPreparationDataEntry { update_epoch: Epoch, preparation_data: ProposerPreparationData, + gas_limit: Option, +} + +impl ProposerPreparationDataEntry { + pub fn update(&mut self, updated: Self) -> bool { + let mut changed = false; + // Update `gas_limit` if `updated.gas_limit` is `Some` and: + // - `self.gas_limit` is `None`, or + // - both are `Some` but the values differ. + if let Some(updated_gas_limit) = updated.gas_limit { + if self.gas_limit != Some(updated_gas_limit) { + self.gas_limit = Some(updated_gas_limit); + changed = true; + } + } + + // Update `update_epoch` if it differs + if self.update_epoch != updated.update_epoch { + self.update_epoch = updated.update_epoch; + changed = true; + } + + // Update `preparation_data` if it differs + if self.preparation_data != updated.preparation_data { + self.preparation_data = updated.preparation_data; + changed = true; + } + + changed + } } #[derive(Hash, PartialEq, Eq)] @@ -711,23 +753,29 @@ impl ExecutionLayer { } /// Updates the proposer preparation data provided by validators - pub async fn update_proposer_preparation( - &self, - update_epoch: Epoch, - preparation_data: &[ProposerPreparationData], - ) { + pub async fn update_proposer_preparation<'a, I>(&self, update_epoch: Epoch, proposer_data: I) + where + I: IntoIterator)>, + { let mut proposer_preparation_data = self.proposer_preparation_data().await; - for preparation_entry in preparation_data { + + for (preparation_entry, gas_limit) in proposer_data { let new = ProposerPreparationDataEntry { update_epoch, preparation_data: preparation_entry.clone(), + gas_limit: *gas_limit, }; - let existing = - proposer_preparation_data.insert(preparation_entry.validator_index, new.clone()); - - if existing != Some(new) { - metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + match proposer_preparation_data.entry(preparation_entry.validator_index) { + Entry::Occupied(mut entry) => { + if entry.get_mut().update(new) { + metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + } + } + Entry::Vacant(entry) => { + entry.insert(new); + metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + } } } } @@ -809,6 +857,13 @@ impl ExecutionLayer { } } + pub async fn get_proposer_gas_limit(&self, proposer_index: u64) -> Option { + self.proposer_preparation_data() + .await + .get(&proposer_index) + .and_then(|entry| entry.gas_limit) + } + /// Maps to the `engine_getPayload` JSON-RPC call. /// /// However, it will attempt to call `self.prepare_payload` if it cannot find an existing @@ -818,14 +873,10 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - #[allow(clippy::too_many_arguments)] pub async fn get_payload( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, + payload_parameters: PayloadParameters<'_>, builder_params: BuilderParams, - current_fork: ForkName, spec: &ChainSpec, builder_boost_factor: Option, block_production_version: BlockProductionVersion, @@ -833,11 +884,8 @@ impl ExecutionLayer { let payload_result_type = match block_production_version { BlockProductionVersion::V3 => match self .determine_and_fetch_payload( - parent_hash, - payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - current_fork, builder_boost_factor, spec, ) @@ -857,25 +905,11 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::GET_BLINDED_PAYLOAD], ); - self.determine_and_fetch_payload( - parent_hash, - payload_attributes, - forkchoice_update_params, - builder_params, - current_fork, - None, - spec, - ) - .await? + self.determine_and_fetch_payload(payload_parameters, builder_params, None, spec) + .await? } BlockProductionVersion::FullV2 => self - .get_full_payload_with( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - noop, - ) + .get_full_payload_with(payload_parameters, noop) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local)?, @@ -922,17 +956,15 @@ impl ExecutionLayer { async fn fetch_builder_and_local_payloads( &self, builder: &BuilderHttpClient, - parent_hash: ExecutionBlockHash, builder_params: &BuilderParams, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, ) -> ( Result>>, builder_client::Error>, Result, Error>, ) { let slot = builder_params.slot; let pubkey = &builder_params.pubkey; + let parent_hash = payload_parameters.parent_hash; info!( self.log(), @@ -950,17 +982,12 @@ impl ExecutionLayer { .await }), timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { - self.get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) - .await - .and_then(|local_result_type| match local_result_type { - GetPayloadResponseType::Full(payload) => Ok(payload), - GetPayloadResponseType::Blinded(_) => Err(Error::PayloadTypeMismatch), - }) + self.get_full_payload_caching(payload_parameters) + .await + .and_then(|local_result_type| match local_result_type { + GetPayloadResponseType::Full(payload) => Ok(payload), + GetPayloadResponseType::Blinded(_) => Err(Error::PayloadTypeMismatch), + }) }) ); @@ -984,26 +1011,17 @@ impl ExecutionLayer { (relay_result, local_result) } - #[allow(clippy::too_many_arguments)] async fn determine_and_fetch_payload( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, + payload_parameters: PayloadParameters<'_>, builder_params: BuilderParams, - current_fork: ForkName, builder_boost_factor: Option, spec: &ChainSpec, ) -> Result>, Error> { let Some(builder) = self.builder() else { // no builder.. return local payload return self - .get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .get_full_payload_caching(payload_parameters) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local); @@ -1034,26 +1052,15 @@ impl ExecutionLayer { ), } return self - .get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .get_full_payload_caching(payload_parameters) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local); } + let parent_hash = payload_parameters.parent_hash; let (relay_result, local_result) = self - .fetch_builder_and_local_payloads( - builder.as_ref(), - parent_hash, - &builder_params, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .fetch_builder_and_local_payloads(builder.as_ref(), &builder_params, payload_parameters) .await; match (relay_result, local_result) { @@ -1118,14 +1125,9 @@ impl ExecutionLayer { ); // check relay payload validity - if let Err(reason) = verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - Some(local.block_number()), - current_fork, - spec, - ) { + if let Err(reason) = + verify_builder_bid(&relay, payload_parameters, Some(local.block_number()), spec) + { // relay payload invalid -> return local metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, @@ -1202,14 +1204,7 @@ impl ExecutionLayer { "parent_hash" => ?parent_hash, ); - match verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - None, - current_fork, - spec, - ) { + match verify_builder_bid(&relay, payload_parameters, None, spec) { Ok(()) => Ok(ProvenancedPayload::try_from(relay.data.message)?), Err(reason) => { metrics::inc_counter_vec( @@ -1234,32 +1229,28 @@ impl ExecutionLayer { /// Get a full payload and cache its result in the execution layer's payload cache. async fn get_full_payload_caching( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, ) -> Result, Error> { - self.get_full_payload_with( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - Self::cache_payload, - ) - .await + self.get_full_payload_with(payload_parameters, Self::cache_payload) + .await } async fn get_full_payload_with( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, cache_fn: fn( &ExecutionLayer, PayloadContentsRefTuple, ) -> Option>, ) -> Result, Error> { + let PayloadParameters { + parent_hash, + payload_attributes, + forkchoice_update_params, + current_fork, + .. + } = payload_parameters; + self.engine() .request(move |engine| async move { let payload_id = if let Some(id) = engine @@ -1984,6 +1975,10 @@ enum InvalidBuilderPayload { payload: Option, expected: Option, }, + GasLimitMismatch { + payload: u64, + expected: u64, + }, } impl fmt::Display for InvalidBuilderPayload { @@ -2022,19 +2017,51 @@ impl fmt::Display for InvalidBuilderPayload { opt_string(expected) ) } + InvalidBuilderPayload::GasLimitMismatch { payload, expected } => { + write!(f, "payload gas limit was {} not {}", payload, expected) + } } } } +/// Calculate the expected gas limit for a block. +pub fn expected_gas_limit( + parent_gas_limit: u64, + target_gas_limit: u64, + spec: &ChainSpec, +) -> Option { + // Calculate the maximum gas limit difference allowed safely + let max_gas_limit_difference = parent_gas_limit + .checked_div(spec.gas_limit_adjustment_factor) + .and_then(|result| result.checked_sub(1)) + .unwrap_or(0); + + // Adjust the gas limit safely + if target_gas_limit > parent_gas_limit { + let gas_diff = target_gas_limit.saturating_sub(parent_gas_limit); + parent_gas_limit.checked_add(std::cmp::min(gas_diff, max_gas_limit_difference)) + } else { + let gas_diff = parent_gas_limit.saturating_sub(target_gas_limit); + parent_gas_limit.checked_sub(std::cmp::min(gas_diff, max_gas_limit_difference)) + } +} + /// Perform some cursory, non-exhaustive validation of the bid returned from the builder. fn verify_builder_bid( bid: &ForkVersionedResponse>, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, + payload_parameters: PayloadParameters<'_>, block_number: Option, - current_fork: ForkName, spec: &ChainSpec, ) -> Result<(), Box> { + let PayloadParameters { + parent_hash, + payload_attributes, + current_fork, + parent_gas_limit, + proposer_gas_limit, + .. + } = payload_parameters; + let is_signature_valid = bid.data.verify_signature(spec); let header = &bid.data.message.header(); @@ -2050,6 +2077,8 @@ fn verify_builder_bid( .cloned() .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); let payload_withdrawals_root = header.withdrawals_root().ok(); + let expected_gas_limit = proposer_gas_limit + .and_then(|target_gas_limit| expected_gas_limit(parent_gas_limit, target_gas_limit, spec)); if header.parent_hash() != parent_hash { Err(Box::new(InvalidBuilderPayload::ParentHash { @@ -2086,6 +2115,14 @@ fn verify_builder_bid( payload: payload_withdrawals_root, expected: expected_withdrawals_root, })) + } else if expected_gas_limit + .map(|gas_limit| header.gas_limit() != gas_limit) + .unwrap_or(false) + { + Err(Box::new(InvalidBuilderPayload::GasLimitMismatch { + payload: header.gas_limit(), + expected: expected_gas_limit.unwrap_or(0), + })) } else { Ok(()) } @@ -2138,6 +2175,27 @@ mod test { .await; } + #[tokio::test] + async fn test_expected_gas_limit() { + let spec = ChainSpec::mainnet(); + assert_eq!( + expected_gas_limit(30_000_000, 30_000_000, &spec), + Some(30_000_000) + ); + assert_eq!( + expected_gas_limit(30_000_000, 40_000_000, &spec), + Some(30_029_295) + ); + assert_eq!( + expected_gas_limit(30_029_295, 40_000_000, &spec), + Some(30_058_619) + ); + assert_eq!( + expected_gas_limit(30_058_619, 30_000_000, &spec), + Some(30_029_266) + ); + } + #[tokio::test] async fn test_forked_terminal_block() { let runtime = TestRuntime::default(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 4deb91e056..4fab7150ce 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -28,8 +28,8 @@ use super::DEFAULT_TERMINAL_BLOCK; const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); -const GAS_LIMIT: u64 = 16384; -const GAS_USED: u64 = GAS_LIMIT - 1; +pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; +const GAS_USED: u64 = DEFAULT_GAS_LIMIT - 1; #[derive(Clone, Debug, PartialEq)] #[allow(clippy::large_enum_variant)] // This struct is only for testing. @@ -38,6 +38,10 @@ pub enum Block { PoS(ExecutionPayload), } +pub fn mock_el_extra_data() -> types::VariableList { + "block gen was here".as_bytes().to_vec().into() +} + impl Block { pub fn block_number(&self) -> u64 { match self { @@ -67,6 +71,13 @@ impl Block { } } + pub fn gas_limit(&self) -> u64 { + match self { + Block::PoW(_) => DEFAULT_GAS_LIMIT, + Block::PoS(payload) => payload.gas_limit(), + } + } + pub fn as_execution_block(&self, total_difficulty: Uint256) -> ExecutionBlock { match self { Block::PoW(block) => ExecutionBlock { @@ -570,10 +581,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -587,10 +598,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -603,10 +614,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -623,10 +634,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -642,10 +653,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 341daedbc8..879b54eb07 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,5 +1,5 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; -use crate::{Config, ExecutionLayer, PayloadAttributes}; +use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER}; use fork_choice::ForkchoiceUpdateParameters; @@ -54,6 +54,10 @@ impl Operation { } } +pub fn mock_builder_extra_data() -> types::VariableList { + "mock_builder".as_bytes().to_vec().into() +} + #[derive(Debug)] // We don't use the string value directly, but it's used in the Debug impl which is required by `warp::reject::Reject`. struct Custom(#[allow(dead_code)] String); @@ -72,6 +76,8 @@ pub trait BidStuff { fn set_withdrawals_root(&mut self, withdrawals_root: Hash256); fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature; + + fn stamp_payload(&mut self); } impl BidStuff for BuilderBid { @@ -203,6 +209,29 @@ impl BidStuff for BuilderBid { let message = self.signing_root(domain); sk.sign(message) } + + // this helps differentiate a builder block from a regular block + fn stamp_payload(&mut self) { + let extra_data = mock_builder_extra_data::(); + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Electra(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + } + } } #[derive(Clone)] @@ -286,6 +315,7 @@ impl MockBuilder { while let Some(op) = guard.pop() { op.apply(bid); } + bid.stamp_payload(); } } @@ -413,11 +443,12 @@ pub fn serve( let block = head.data.message(); let head_block_root = block.tree_hash_root(); - let head_execution_hash = block + let head_execution_payload = block .body() .execution_payload() - .map_err(|_| reject("pre-merge block"))? - .block_hash(); + .map_err(|_| reject("pre-merge block"))?; + let head_execution_hash = head_execution_payload.block_hash(); + let head_gas_limit = head_execution_payload.gas_limit(); if head_execution_hash != parent_hash { return Err(reject("head mismatch")); } @@ -529,14 +560,24 @@ pub fn serve( finalized_hash: Some(finalized_execution_hash), }; + let proposer_gas_limit = builder + .val_registration_cache + .read() + .get(&pubkey) + .map(|v| v.message.gas_limit); + + let payload_parameters = PayloadParameters { + parent_hash: head_execution_hash, + parent_gas_limit: head_gas_limit, + proposer_gas_limit, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: fork, + }; + let payload_response_type = builder .el - .get_full_payload_caching( - head_execution_hash, - &payload_attributes, - forkchoice_update_params, - fork, - ) + .get_full_payload_caching(payload_parameters) .await .map_err(|_| reject("couldn't get payload"))?; @@ -648,8 +689,6 @@ pub fn serve( } }; - message.set_gas_limit(cached_data.gas_limit); - builder.apply_operations(&mut message); let mut signature = diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index a9f1313e46..48372a39be 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -90,6 +90,7 @@ impl MockExecutionLayer { }; let parent_hash = latest_execution_block.block_hash(); + let parent_gas_limit = latest_execution_block.gas_limit(); let block_number = latest_execution_block.block_number() + 1; let timestamp = block_number; let prev_randao = Hash256::from_low_u64_be(block_number); @@ -131,14 +132,20 @@ impl MockExecutionLayer { let payload_attributes = PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: ForkName::Bellatrix, + }; + let block_proposal_content_type = self .el .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::FullV2, @@ -171,14 +178,20 @@ impl MockExecutionLayer { let payload_attributes = PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: ForkName::Bellatrix, + }; + let block_proposal_content_type = self .el .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::BlindedV2, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 1e71fde255..faf6d4ef0b 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -25,12 +25,13 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; +pub use execution_block_generator::DEFAULT_GAS_LIMIT; pub use execution_block_generator::{ generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, - static_valid_tx, Block, ExecutionBlockGenerator, + mock_el_extra_data, static_valid_tx, Block, ExecutionBlockGenerator, }; pub use hook::Hook; -pub use mock_builder::{MockBuilder, Operation}; +pub use mock_builder::{mock_builder_extra_data, MockBuilder, Operation}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index fe05f55a01..23d177da78 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3704,7 +3704,10 @@ pub fn serve( ); execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|data| (data, &None)), + ) .await; chain @@ -3762,7 +3765,7 @@ pub fn serve( let spec = &chain.spec; let (preparation_data, filtered_registration_data): ( - Vec, + Vec<(ProposerPreparationData, Option)>, Vec, ) = register_val_data .into_iter() @@ -3792,12 +3795,15 @@ pub fn serve( // Filter out validators who are not 'active' or 'pending'. is_active_or_pending.then_some({ ( - ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data - .message - .fee_recipient, - }, + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data + .message + .fee_recipient, + }, + Some(register_data.message.gas_limit), + ), register_data, ) }) @@ -3807,7 +3813,10 @@ pub fn serve( // Update the prepare beacon proposer cache based on this request. execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|(data, limit)| (data, limit)), + ) .await; // Call prepare beacon proposer blocking with the latest update in order to make diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index c3ed334782..627b0d0b17 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -447,9 +447,14 @@ pub async fn proposer_boost_re_org_test( // Send proposer preparation data for all validators. let proposer_preparation_data = all_validators .iter() - .map(|i| ProposerPreparationData { - validator_index: *i as u64, - fee_recipient: Address::from_low_u64_be(*i as u64), + .map(|i| { + ( + ProposerPreparationData { + validator_index: *i as u64, + fee_recipient: Address::from_low_u64_be(*i as u64), + }, + None, + ) }) .collect::>(); harness @@ -459,7 +464,7 @@ pub async fn proposer_boost_re_org_test( .unwrap() .update_proposer_preparation( head_slot.epoch(E::slots_per_epoch()) + 1, - &proposer_preparation_data, + proposer_preparation_data.iter().map(|(a, b)| (a, b)), ) .await; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 940f3ae9c0..080a393b4d 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -13,8 +13,10 @@ use eth2::{ Error::ServerMessage, StatusCode, Timeouts, }; +use execution_layer::expected_gas_limit; use execution_layer::test_utils::{ - MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + mock_builder_extra_data, mock_el_extra_data, MockBuilder, Operation, + DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_GAS_LIMIT, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; @@ -348,7 +350,6 @@ impl ApiTester { let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); - let log = test_logger(); let ApiServer { @@ -3755,7 +3756,11 @@ impl ApiTester { self } - pub async fn test_post_validator_register_validator(self) -> Self { + async fn generate_validator_registration_data( + &self, + fee_recipient_generator: impl Fn(usize) -> Address, + gas_limit: u64, + ) -> (Vec, Vec
) { let mut registrations = vec![]; let mut fee_recipients = vec![]; @@ -3766,15 +3771,13 @@ impl ApiTester { epoch: genesis_epoch, }; - let expected_gas_limit = 11_111_111; - for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { let pubkey = keypair.pk.compress(); - let fee_recipient = Address::from_low_u64_be(val_index as u64); + let fee_recipient = fee_recipient_generator(val_index); let data = ValidatorRegistrationData { fee_recipient, - gas_limit: expected_gas_limit, + gas_limit, timestamp: 0, pubkey, }; @@ -3797,6 +3800,17 @@ impl ApiTester { registrations.push(signed); } + (registrations, fee_recipients) + } + + pub async fn test_post_validator_register_validator(self) -> Self { + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT, + ) + .await; + self.client .post_validator_register_validator(®istrations) .await @@ -3811,14 +3825,22 @@ impl ApiTester { .zip(fee_recipients.into_iter()) .enumerate() { - let actual = self + let actual_fee_recipient = self .chain .execution_layer .as_ref() .unwrap() .get_suggested_fee_recipient(val_index as u64) .await; - assert_eq!(actual, fee_recipient); + let actual_gas_limit = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_proposer_gas_limit(val_index as u64) + .await; + assert_eq!(actual_fee_recipient, fee_recipient); + assert_eq!(actual_gas_limit, Some(DEFAULT_GAS_LIMIT)); } self @@ -3839,46 +3861,12 @@ impl ApiTester { ) .await; - let mut registrations = vec![]; - let mut fee_recipients = vec![]; - - let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); - let fork = Fork { - current_version: self.chain.spec.genesis_fork_version, - previous_version: self.chain.spec.genesis_fork_version, - epoch: genesis_epoch, - }; - - let expected_gas_limit = 11_111_111; - - for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { - let pubkey = keypair.pk.compress(); - let fee_recipient = Address::from_low_u64_be(val_index as u64); - - let data = ValidatorRegistrationData { - fee_recipient, - gas_limit: expected_gas_limit, - timestamp: 0, - pubkey, - }; - - let domain = self.chain.spec.get_domain( - genesis_epoch, - Domain::ApplicationMask(ApplicationDomain::Builder), - &fork, - Hash256::zero(), - ); - let message = data.signing_root(domain); - let signature = keypair.sk.sign(message); - - let signed = SignedValidatorRegistrationData { - message: data, - signature, - }; - - fee_recipients.push(fee_recipient); - registrations.push(signed); - } + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT, + ) + .await; self.client .post_validator_register_validator(®istrations) @@ -3911,6 +3899,47 @@ impl ApiTester { self } + pub async fn test_post_validator_register_validator_higher_gas_limit(&self) { + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT + 10_000_000, + ) + .await; + + self.client + .post_validator_register_validator(®istrations) + .await + .unwrap(); + + for (val_index, (_, fee_recipient)) in self + .chain + .head_snapshot() + .beacon_state + .validators() + .into_iter() + .zip(fee_recipients.into_iter()) + .enumerate() + { + let actual_fee_recipient = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_suggested_fee_recipient(val_index as u64) + .await; + let actual_gas_limit = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_proposer_gas_limit(val_index as u64) + .await; + assert_eq!(actual_fee_recipient, fee_recipient); + assert_eq!(actual_gas_limit, Some(DEFAULT_GAS_LIMIT + 10_000_000)); + } + } + pub async fn test_post_validator_liveness_epoch(self) -> Self { let epoch = self.chain.epoch().unwrap(); let head_state = self.chain.head_beacon_state_cloned(); @@ -4031,7 +4060,7 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); self } @@ -4058,7 +4087,8 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 16_384); + // This is the graffiti of the mock execution layer, not the builder. + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -4085,7 +4115,7 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); self } @@ -4109,7 +4139,7 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); // If this cache is empty, it indicates fallback was not used, so the payload came from the // mock builder. @@ -4126,10 +4156,16 @@ impl ApiTester { pub async fn test_payload_accepts_mutated_gas_limit(self) -> Self { // Mutate gas limit. + let builder_limit = expected_gas_limit( + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_LIMIT + 10_000_000, + self.chain.spec.as_ref(), + ) + .expect("calculate expected gas limit"); self.mock_builder .as_ref() .unwrap() - .add_operation(Operation::GasLimit(30_000_000)); + .add_operation(Operation::GasLimit(builder_limit as usize)); let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); @@ -4149,7 +4185,7 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 30_000_000); + assert_eq!(payload.gas_limit(), builder_limit); // This cache should not be populated because fallback should not have been used. assert!(self @@ -4159,6 +4195,49 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + + self + } + + pub async fn test_builder_payload_rejected_when_gas_limit_incorrect(self) -> Self { + self.test_post_validator_register_validator_higher_gas_limit() + .await; + + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::GasLimit(1)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4232,6 +4311,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -4315,6 +4397,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4404,6 +4489,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4491,6 +4579,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4577,6 +4668,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4647,6 +4741,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4707,6 +4804,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4780,6 +4880,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); // Without proposing, advance into the next slot, this should make us cross the threshold // number of skips, causing us to use the fallback. @@ -4809,6 +4911,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -4915,6 +5019,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this // scenario starts at an epoch boundary). @@ -4954,6 +5060,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); self } @@ -5072,6 +5180,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -5149,6 +5259,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -5214,6 +5327,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -5279,6 +5395,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -5343,6 +5462,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -6682,6 +6804,8 @@ async fn post_validator_register_valid_v3() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_gas_limit_mutation() { ApiTester::new_mev_tester() + .await + .test_builder_payload_rejected_when_gas_limit_incorrect() .await .test_payload_accepts_mutated_gas_limit() .await; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 79dcc65ea3..0b33a76ff1 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -127,6 +127,11 @@ pub struct ChainSpec { pub deposit_network_id: u64, pub deposit_contract_address: Address, + /* + * Execution Specs + */ + pub gas_limit_adjustment_factor: u64, + /* * Altair hard fork params */ @@ -715,6 +720,11 @@ impl ChainSpec { .parse() .expect("chain spec deposit contract address"), + /* + * Execution Specs + */ + gas_limit_adjustment_factor: 1024, + /* * Altair hard fork params */ @@ -1029,6 +1039,11 @@ impl ChainSpec { .parse() .expect("chain spec deposit contract address"), + /* + * Execution Specs + */ + gas_limit_adjustment_factor: 1024, + /* * Altair hard fork params */ @@ -1285,6 +1300,10 @@ pub struct Config { #[serde(with = "serde_utils::address_hex")] deposit_contract_address: Address, + #[serde(default = "default_gas_limit_adjustment_factor")] + #[serde(with = "serde_utils::quoted_u64")] + gas_limit_adjustment_factor: u64, + #[serde(default = "default_gossip_max_size")] #[serde(with = "serde_utils::quoted_u64")] gossip_max_size: u64, @@ -1407,6 +1426,10 @@ const fn default_max_per_epoch_activation_churn_limit() -> u64 { 8 } +const fn default_gas_limit_adjustment_factor() -> u64 { + 1024 +} + const fn default_gossip_max_size() -> u64 { 10485760 } @@ -1659,6 +1682,8 @@ impl Config { deposit_network_id: spec.deposit_network_id, deposit_contract_address: spec.deposit_contract_address, + gas_limit_adjustment_factor: spec.gas_limit_adjustment_factor, + gossip_max_size: spec.gossip_max_size, max_request_blocks: spec.max_request_blocks, min_epochs_for_block_requests: spec.min_epochs_for_block_requests, @@ -1733,6 +1758,7 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gas_limit_adjustment_factor, gossip_max_size, min_epochs_for_block_requests, max_chunk_size, @@ -1794,6 +1820,7 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gas_limit_adjustment_factor, terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index b82a897da5..e68801840a 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -32,6 +32,7 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn prev_randao(&self) -> Hash256; fn block_number(&self) -> u64; fn timestamp(&self) -> u64; + fn extra_data(&self) -> VariableList; fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; @@ -225,6 +226,13 @@ impl ExecPayload for FullPayload { }) } + fn extra_data<'a>(&'a self) -> VariableList { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -357,6 +365,13 @@ impl ExecPayload for FullPayloadRef<'_, E> { }) } + fn extra_data<'a>(&'a self) -> VariableList { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -542,6 +557,13 @@ impl ExecPayload for BlindedPayload { }) } + fn extra_data<'a>(&'a self) -> VariableList::MaxExtraDataBytes> { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -643,6 +665,13 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { }) } + fn extra_data<'a>(&'a self) -> VariableList::MaxExtraDataBytes> { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_blinded_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -745,6 +774,10 @@ macro_rules! impl_exec_payload_common { self.$wrapped_field.timestamp } + fn extra_data(&self) -> VariableList { + self.$wrapped_field.extra_data.clone() + } + fn block_hash(&self) -> ExecutionBlockHash { self.$wrapped_field.block_hash } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 7d4d229fef..427bcf5e9c 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -809,10 +809,13 @@ impl Tester { if expected_should_override_fcu.validator_is_connected { el.update_proposer_preparation( next_slot_epoch, - &[ProposerPreparationData { - validator_index: dbg!(proposer_index) as u64, - fee_recipient: Default::default(), - }], + [( + &ProposerPreparationData { + validator_index: dbg!(proposer_index) as u64, + fee_recipient: Default::default(), + }, + &None, + )], ) .await; } else { diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 0289fd4206..f664509304 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -3,9 +3,10 @@ use crate::execution_engine::{ }; use crate::transactions::transactions; use ethers_providers::Middleware; +use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, - PayloadStatus, + PayloadParameters, PayloadStatus, }; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; @@ -251,6 +252,7 @@ impl TestRig { */ let parent_hash = terminal_pow_block_hash; + let parent_gas_limit = DEFAULT_GAS_LIMIT; let timestamp = timestamp_now(); let prev_randao = Hash256::zero(); let head_root = Hash256::zero(); @@ -324,15 +326,22 @@ impl TestRig { Some(vec![]), None, ); + + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: TEST_FORK, + }; + let block_proposal_content_type = self .ee_a .execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - TEST_FORK, &self.spec, None, BlockProductionVersion::FullV2, @@ -476,15 +485,22 @@ impl TestRig { Some(vec![]), None, ); + + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: TEST_FORK, + }; + let block_proposal_content_type = self .ee_a .execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - TEST_FORK, &self.spec, None, BlockProductionVersion::FullV2, From 8e891a8bfd139dde3e63a5ed70bc8b76eea896bf Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 16 Dec 2024 14:44:02 +0900 Subject: [PATCH 12/25] Fix web3signer test fails on macOS (#6588) * Add lighthouse/key_legacy.p12 for macOS * Specify `-days 825` to meet Apple's requirements for TLS server certificates * Remove `-aes256` as it's ignored on exporting The following warning will appear: Warning: output encryption option -aes256 ignored with -export * Update certificates and keys --- testing/web3signer_tests/src/lib.rs | 6 +- testing/web3signer_tests/tls/generate.sh | 21 +++- .../web3signer_tests/tls/lighthouse/cert.pem | 58 +++++----- .../web3signer_tests/tls/lighthouse/key.key | 100 +++++++++--------- .../web3signer_tests/tls/lighthouse/key.p12 | Bin 4371 -> 4387 bytes .../tls/lighthouse/key_legacy.p12 | Bin 0 -> 4221 bytes .../tls/lighthouse/web3signer.pem | 58 +++++----- .../web3signer_tests/tls/web3signer/cert.pem | 58 +++++----- .../web3signer_tests/tls/web3signer/key.key | 100 +++++++++--------- .../web3signer_tests/tls/web3signer/key.p12 | Bin 4371 -> 4387 bytes .../tls/web3signer/known_clients.txt | 2 +- 11 files changed, 210 insertions(+), 193 deletions(-) create mode 100644 testing/web3signer_tests/tls/lighthouse/key_legacy.p12 diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index a58dcb5fa0..bebc8fa13b 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -130,7 +130,11 @@ mod tests { } fn client_identity_path() -> PathBuf { - tls_dir().join("lighthouse").join("key.p12") + if cfg!(target_os = "macos") { + tls_dir().join("lighthouse").join("key_legacy.p12") + } else { + tls_dir().join("lighthouse").join("key.p12") + } } fn client_identity_password() -> String { diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index f918e87cf8..3b14dbddba 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,7 +1,20 @@ #!/bin/bash -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && -openssl pkcs12 -export -aes256 -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && + +# The lighthouse/key_legacy.p12 file is generated specifically for macOS because the default `openssl pkcs12` encoding +# algorithm in OpenSSL v3 is not compatible with the PKCS algorithm used by the Apple Security Framework. The client +# side (using the reqwest crate) relies on the Apple Security Framework to parse PKCS files. +# We don't need to generate web3signer/key_legacy.p12 because the compatibility issue doesn't occur on the web3signer +# side. It seems that web3signer (Java) uses its own implementation to parse PKCS files. +# See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2469252651 + +# We specify `-days 825` when generating the certificate files because Apple requires TLS server certificates to have a +# validity period of 825 days or fewer. +# See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2474979183 + +openssl req -x509 -sha256 -nodes -days 825 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && +openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && cp web3signer/cert.pem lighthouse/web3signer.pem && -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && -openssl pkcs12 -export -aes256 -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl req -x509 -sha256 -nodes -days 825 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && +openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl pkcs12 -export -legacy -out lighthouse/key_legacy.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem index 24b0a2e5c0..4aaf66b747 100644 --- a/testing/web3signer_tests/tls/lighthouse/cert.pem +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUXZijYo8W4/9dAq58ocFEbZDxohwwDQYJKoZIhvcNAQEL +MIIFuDCCA6CgAwIBAgIUa3O7icWD4W7c5yRMjG/EX422ODUwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMDAyNTYzNloYDzIxMjMwODI3MDI1NjM2 -WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 -MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC1 -R1M9NnRwUsqFvJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52 -aHSA2fs2KyeA61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDE -jf0ogUVM9TCEt6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAw -Oz1d8/fxYJvIpT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5Fe -V0fPth+e9XMAH7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI -0vps1zF9Bo8QewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWM -VcR//EtbOZGqzGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr -67Vyi9SWSM6rdRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91f -kpT6kjc6d2h4bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa -3zLeqd89dS7HNLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcf -TPFe8xuDYsi155veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABo1QwUjALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQU6r7QHkcEsWhEZHpcMpGxwKXQL9swDQYJKoZIhvcNAQELBQADggIBACyO -8xzqotye1J6xhDQCQnQF3dXaPTqfT31Ypg8UeU25V9N+bZO04CJKlOblukuvkedE -x1RDeqG3A81D4JOgTGFmFVoEF4iTk3NBrsHuMzph6ImHTd3TD+5iG5a3GL0i9PAI -dHTT6z6t2wlayjmHotqQ+N4A4msx8IPBRULcCmId319gpSDHsvt2wYbLdh+d9E2h -vI0VleJpJ7eoy05842VTkFJebriSpi75yFphKUnyAKlONiMN3o6eg90wpWdI+1rQ -js5lfm+pxYw8H6eSf+rl30m+amrxUlooqrSCHNVSO2c4+W5m/r3JfOiRqVUTxaO8 -0f/xYXo6SdRxdvJV18LEzOHURvkbqBjLoEfHbCC2EApevWAeCdjhvCBPl1IJZtFP -sYDpYtHhw69JmZ7Nj75cQyRtJMQ5S4GsJ/haYXNZPgRL1XBo1ntuc8K1cLZ2MucQ -1170+2pi3IvwmST+/+7+2fyms1AwF7rj2dVxNfPIvOxi6E9lHmPVxvpbuOYOEhex -XqTum/MjI17Qf6eoipk81ppCFtO9s3qNe9SBSjzYEYnsytaMdZSSjsOhE/IyYPHI -SICMjWE13du03Z5xWwK9i3UiFq+hIPhBHFPGkNFMmkQtcyS9lj9R0tKUmWdFPNa8 -nuhxn5kLUMriv3zsdhMPUC4NwM5XsopdWcuSxfnt +VQQDDApsaWdodGhvdXNlMB4XDTI0MTExNjIyMTI0NloXDTI3MDIxOTIyMTI0Nlow +azELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0eTES +MBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYDVQQD +DApsaWdodGhvdXNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsAg4 +CkW51XFC0ZlcLXOzAHHD3e1y2tCkvQLCC5YG4QGVnXtva4puSGprs5H2r46TM+92 +7EXqNls+UWARLJE8+cw6Jz2Ibpjyv9TwdHUYqlRjSsAJ1E9kFKWnQuzWSPUilY22 +KfkxkEfauAvL5qXBAX9C31E9t/QWWgFtiGetwk+MuVoqLFCifw2iKfKrKod/t0Ua +ykxm3PUi1LIjZq3yZIg6beiVIGNQ/FWcNK3NeR6LP7ZDvSWl1vJAQ/6EBTcNTYKb +B3rEiHmme20Vpl6QQMvzlZ+e+ZaU0JsycvEfKrBACvPXX1Bi1GVFFstb5XQ4a/f4 +p7LUQ9rJwOkm5mRLgrSkNzq4Nk1lPOIam5QFpdW4GBfeIUL0Q4K9io/fYsxF1DXh +fxCW1N6E6+RKhVG2cEdtnAmQxg9d8vIEMvFtuVMFMYjQ+qkJ5V0Ye11V/9lMo4Vf +H2ialSTLTKxoEjmYfCHXKu7JCba04uGEv9gzaX7Zk+uK9gN1FIMvDT3UIHZTDwtr +cm2kjn3wsuRiK3P974pAVAome+60jmH9M0IsBxLXilCI6aIcYwvHkfoSNwXQr1AI +6rBBA4o8df0OFvMp2/r1Ll9nLDTT7AxtjHu7C2HU46Fy9U01+oRiqW+UCY9+daMD +tQJMTkjfPwOU6b9KUOPKpraDnPubwNU6CXs6ySMCAwEAAaNUMFIwCwYDVR0PBAQD +AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwHQYDVR0O +BBYEFKbpk6hZNzlzv/AdKtsl6x+dgBo+MA0GCSqGSIb3DQEBCwUAA4ICAQCmICqz +X5WOhwUm6LJJwMvKgFoVkav6ZcG/bEPiLe4waM2BubTpa1KPke8kMSmd/eLRxOiU +o1Z4Wi+bDw/ZGZHhnj/bJBZei9O+uRV4RbHCBh/LutRjY5zrublXMTtmjxCIjjHK +nQnoFFqKelyUGdaOw1ttooRT2FSDriZ6LKJ9vrTx0eCPBPA0EyaxuaxX3e/qYfE6 +sdrseEZSsouAmNCQ6jHnrQlzjeGAE6tlSTC3NVWbDlDbnX6cdRF07kV5PxnfcoyO +HGM3hdrIk5mhLpXrNKZp1nI4Ecd6UKiMCLgVxfexRKVJn00IR1URotRXZ2H9hQnh +xT5CnEBM+9dXoiwIvU+QYpnxo7mc47I6VkvoBI05rnS10bliwAk20yZuqc8iYC7R +r+ISRnhAcSb0otnKvxQQqzRH4Fi13g4mIoxbPJq+xTrNomKe/ywUe5q1Dt8QMhEg +7Sv8yg4ErKEvWIk5N0JOe1PaysobWXkv5n+xH9eJneyuBHGdi8qXe+2JLkK7ZfKB +uuLZyQcbUxb0/FSOhvtYu+2hPUb7nCOFvheAafHJu1P0pOkP8NNpM9X+tNw8Orum +VVFO8rvOh4+pH8sXRZ4tUQ33mbQS96ZSuiMJYCQf6EDkqmtRkOHCAvKkEtRLm2yV +4IRAZKHZaeKYr1UXwaqzpwES+8ZZLjURkvqvnQ== -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key index d00b6c2122..2b510c6b6d 100644 --- a/testing/web3signer_tests/tls/lighthouse/key.key +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC1R1M9NnRwUsqF -vJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52aHSA2fs2KyeA -61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDEjf0ogUVM9TCE -t6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAwOz1d8/fxYJvI -pT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5FeV0fPth+e9XMA -H7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI0vps1zF9Bo8Q -ewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWMVcR//EtbOZGq -zGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr67Vyi9SWSM6r -dRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91fkpT6kjc6d2h4 -bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa3zLeqd89dS7H -NLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcfTPFe8xuDYsi1 -55veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABAoICAEP5a1KMPUwzF0Lfr1Jm1JUk -pLb26C2rkf3B56XIFZgddeJwHHMEkQ9Z6JYM5Bd0KJ6Y23rHgiXVN7plRvOiznMs -MAbgblroC8GbAUZ0eCJr5nxyOXQdS1jHufbA21x7FGbvsSqDkrdhR2C0uPLMyMvp -VHP7dey1mEyCkHrP+KFRU5kVxOG1WnBMqdY1Ws/uuMBdLk0xItttdOzfXhH4dHQD -wc5aAJrtusyNDFLC25Og49yIgpPMWe+gAYCm5jFz9PgRtVlDOwcxlX5J5+GSm7+U -XM1bPSmU1TSEH233JbQcqo4HkynB71ftbVUtMhEFhLBYoFO4u5Ncpr+wys0xJY4f -3aJRV5+gtlmAmsKN66GoMA10KNlLp2z7XMlx1EXegOHthcKfgf5D6LKRz8qZhknm -FFgAOg9Bak1mt1DighhPUJ0vLYU6K+u0ZXwysYygOkBJ/yj63ApuPCSTQb7U0JlL -JMgesy1om3rVdN0Oc7hNaxq7VwswkzUTUKS2ZvGozF3MmdPHNm5weJTb3NsWv8Qo -HiK1I88tY9oZ5r91SC82hMErmG4ElXFLxic1B29h3fsIe/l+WjmZRXixD9ugV0gj -CvNa8QD9K3hljlNrR6eSXeO2QOyxAEUr2N1MBlxrnAWZCzXKiTvTx1aKDYhJT0DY -zae/etTLHVjzgdH6GS33AoIBAQDaaWYHa9wkJIJPX4siVCatwWKGTjVfDb5Q9upf -twkxCf58pmbzUOXW3dbaz6S0npR0V6Wqh3S8HW7xaHgDZDMLJ1WxLJrgqDKU3Pqc -k7xnA/krWqoRVSOOGkPnSrnZo6AVc6FR+iwJjfuUu0rFDwiyuqvuXpwNsVwvAOoL -xIbaEbGUHiFsZamm2YkoxrEjXGFkZxQX9+n9f+IAiMxMQc0wezRREc8e61/mTovJ -QJ7ZDd7zLUR7Yeqciy59NOsD57cGtnp1K28I2eKLA4taghgd5bJjPkUaHg9j5Xf6 -nsxU2QCp9kpwXvtMxN7pERKWFsnmu8tfJOiUWCpp8SLbIl6nAoIBAQDUefKKjRLa -6quNW0rOGn2kx0K6sG7T45OhwvWXVjnPAjX3/2mAMALT1wc3t0iKDvpIEfMadW2S -O8x2FwyifdJXmkz943EZ/J5Tq1H0wr4NeClX4UlPIAx3CdFlCphqH6QfKtrpQ+Hf -+e8XzjVvdg8Y/RcbWgPgBtOh2oKT5QHDh13/994nH7GhVM7PjLUVvZVmNWaC77zr -bXcvJFF/81PAPWC2JoV6TL/CXvda2tG2clxbSfykfUBPBpeyEijMoxC4UMuCHhbp -NpLfKJQp9XNqbBG2K4jgLQ8Ipk6Vtia/hktLgORf/pbQ4PxEv7OP5e1AOreDg/CW -RnQtBb+/8czbAoIBABfDA8Cm8WpVNoAgKujvMs4QjgGCnLfcrOnuEw2awjs9lRxG -lki+cmLv+6IOmSK1Zf1KU9G7ru2QXjORZA0qZ4s9GkuOSMNMSUR8zh8ey46Bligr -UvlTw+x/2wdcz99nt9DdpZ1flE7tzYMe5UGPIykeufnS/TNYKmlKtivVk75B0ooE -xSof3Vczr4JqK3dnY4ki1cLNy/0yXookV+Wr+wDdRpHTWC9K+EH8JaUdjKqcobbf -I+Ywfu/NDJ++lBr2qKjoTWZV9VyHJ+hr2Etef/Uwujml2qq+vnnlyynPAPfyK+pR -y0NycfCmMoI0w0rk685YfAW75DnPZb3k6B/jG10CggEBAMxf2DoI5EAKRaUcUOHa -fUxIFhl4p8HMPy7zVkORPt2tZLf8xz/z7mRRirG+7FlPetJj4ZBrr09fkZVtKkwJ -9o8o7jGv2hSC9s/IFHb38tMF586N9nPTgenmWbF09ZHuiXEpSZPiJZvIzn/5a1Ch -IHiKyPUYKm4MYvhmM/+J4Z5v0KzrgJXlWHi0GJFu6KfWyaOcbdQ4QWG6009XAcWv -Cbn5z9KlTvKKbFDMA+UyYVG6wrdUfVzC1V6uGq+/49qiZuzDWlz4EFWWlsNsRsft -Pmz5Mjglu+zVqoZJYYGDydWjmT0w53qmae7U2hJOyqr5ILINSIOKH5qMfiboRr6c -GM0CggEAJTQD/jWjHDIZFRO4SmurNLoyY7bSXJsYAhl77j9Cw/G4vcE+erZYAhp3 -LYu2nrnA8498T9F3H1oKWnK7u4YXO8ViyQd73ql7iKrMjE98CjfGcTPCXwOcPAts -ZpM8ykgFTsJpXEFvIR5cyZ6XFSw2m/Z7CRDpmwQ8es4LpNnYA7V5Yu/zDE4h2/2T -NmftCiZvkxwgj6VyKumOxXBnGK6lB+b6YMTltRrgD/35zmJoKRdqyLb1szPJtQuh -HjRTa/BVPgA66xYFWhifRUiYKpc0bARTYofHeoDgu6yPzcHMuM70NQQGF+WWJySg -vc3Za4ClKSLmb3ZA9giTswYMev+3BQ== +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCwCDgKRbnVcULR +mVwtc7MAccPd7XLa0KS9AsILlgbhAZWde29rim5IamuzkfavjpMz73bsReo2Wz5R +YBEskTz5zDonPYhumPK/1PB0dRiqVGNKwAnUT2QUpadC7NZI9SKVjbYp+TGQR9q4 +C8vmpcEBf0LfUT239BZaAW2IZ63CT4y5WiosUKJ/DaIp8qsqh3+3RRrKTGbc9SLU +siNmrfJkiDpt6JUgY1D8VZw0rc15Hos/tkO9JaXW8kBD/oQFNw1NgpsHesSIeaZ7 +bRWmXpBAy/OVn575lpTQmzJy8R8qsEAK89dfUGLUZUUWy1vldDhr9/instRD2snA +6SbmZEuCtKQ3Org2TWU84hqblAWl1bgYF94hQvRDgr2Kj99izEXUNeF/EJbU3oTr +5EqFUbZwR22cCZDGD13y8gQy8W25UwUxiND6qQnlXRh7XVX/2UyjhV8faJqVJMtM +rGgSOZh8Idcq7skJtrTi4YS/2DNpftmT64r2A3UUgy8NPdQgdlMPC2tybaSOffCy +5GIrc/3vikBUCiZ77rSOYf0zQiwHEteKUIjpohxjC8eR+hI3BdCvUAjqsEEDijx1 +/Q4W8ynb+vUuX2csNNPsDG2Me7sLYdTjoXL1TTX6hGKpb5QJj351owO1AkxOSN8/ +A5Tpv0pQ48qmtoOc+5vA1ToJezrJIwIDAQABAoICAAav4teBDpSTjBZD3Slc28/u +6NUYnORZe+iYnwZ4DIrZPij29D40ym7pAm5jFrWHyDYqddOqVEHJKMGuniuZpaQk +cSqy2IJbDRDi5fK5zNYSBQBlJMc/IzryXNUOA8kbU6HN+fDEpqPBSjqNOCtRRwoa +uE+dDNspsPx6UWh9IWMTfCUOZ8u6XguCWRN+3g6F8M2yS/I9AZG81898qBueczbR +qTNdQoAyEnS2sj7ODqArQniJIMmh3he5D15SrNefeVt+1D5uGEkwiQ9NqL58ZfGp +zcPa7HWB/H7Wmac3W0rwpxfDa5fgIq3Id93Sm9fh/yka1Z28c8cGgknxxKiIs6Jg +F7CKZIBJ3XxjcgytB223El/R8faHLpMJSPadDZ7uuU3yD/Qvp/JhRrdgkpE5bbzC +rWL92eVL86cbI/Hamup7VZMMfQpvjJg7FXPUr6ACKBetNkvXH0rqAkxHR8ZgfTeM +EwrpSWS0aktxxeMjzPq4DUaKKVGiN2KMDhbHEd5h2ovWMzyr14isohW81Z8w5R68 +F+2jq3IlVTLe06vmTRXAhOpwecj8UpraZjM1qyFpBd/lAolTjjMxzKJ2DcHlWI8Q +7e9LMvt1fj3bbzJVubdrITjdeom5CnDrmDGcErX9xzom8m3auYLszUENp/sfIHru +0DP+LKb2W4BOmXKs3VABAoIBAQDm4HNpOA7X7Jw7oowS4MoZOeeTjzcldT2AP9O7 +jFf2I2t5Ig0mIIrIrEJCL1X+A3i3RblV7lhU3Dpag8dhZUrXhydgnXKEMH/zz3gx +daCY1NO1fxAx5Y4J8VlCMIA7FpZI6sgRPjLBOFdkD34HcKHsUu/r3KQ1A1xZGLOU +o1kxF2WyORGBwn83kWzhzK9RIwFIdx67m7ZLzwoD6nQul4A6qq1EE+QI5x4UYpBx +ZvQsWUtj0EujIKJFszJczivwGQ86Aj0MB7EaHg+bWtYET1kUmDmc/72sksQJVcsK +wYtkv/MsznAvuWfHVjYJo47+Qs1zpuDKEUC1cu768LtlKpljAoIBAQDDL/T2KilF +qK8IW2u7nyWY8ksN/xJOVC79ozx2cGlR/zbeht051NiaLP8YMwVKl618Bw5L+aHG +a1xA0AeuTvuo5TK/ObrWzMAY6A35gXPMd8msN6SJzIKHZSZrcg2GXTSFkn7iCRJp +vl58VX4FubfrNIXy3NGbgF2muz3Rwvk7bj5Ur3NxX574RLSuftw01rDt2fnfYGKD +NfLXzoR3rJ/E+wmS7sjBJbltvmySDZOyjDDJwAgMrn45Xbh9rVT5w62BbAJ78OTY +O3CBf9t40FmeSBlelqwSY6tUmf02+B8FhMTJzxlaCup2qIPn5z0RHIZ43bnqZ/X1 +nkNSs8ko0f1BAoIBABCw9WcL+Ha/0mO1Uq8itTmxp/5RAkmg+jtFYgdTFCDlWqW9 +QnoZLC9p1Lh4N51PnvCRB98ghh5MdaOJl2aBLjH6wWwItfi8kOONgkEBIgUqjcu3 +TfJtiCFL44oXe43KCj9nSeOFPaIecqL3Q8NB71LohBPnNa/neEuwr3r1fENCT8Xc +vllFOHFKADcq1xnkj/kvM3eYwEsmwrCZyKB9r3WOVUxwq7HBE7mhjpPEP67dHcgv +jOhUOacUV3XCKgcHqMQm2Ub/X1xmA/bVUFerbONCRhgFnS7WxXlvTGiQqYU1I11/ +5zhsDQaqQunbe0ECj1vnGqVBLg5wKrrVoJalx8UCggEAE8438wqQKYtWR2jPY7hg +XkanqwHo353XLtFzfykk5rcY4DebFxUr7WkHcXMr5EfDyMQGhVsNOU8Hi2QQg3Vs +P9UR8yludgFMtLpHQLwL/gFhq2HyBjGERSzUWy61hJ7Mh4k36sO05Jn2iHM8WGRh +7zHjLaOOeVLrLdHuEezQ0WD8Xid3dVeYj+SY2OPygEIQrfHiUvI6zMmanJ9N/b68 +b4ZxkEE+iarESAh8h81s4T8sbCxaJL9H+5Yw9D+0UauzXWCSV/U3o2FUpy9MG9Q4 +Y8E5Icn0J+GJLwp5ESzYKP0x4rBrCCH3bJbo240xOx1D39vP06M85/FpL2kizkuQ +gQKCAQBTmQd/wT+0hH2JoEA2yCtB3ylDSmarZr9yZ83j3hy7oJOL48FhzhMTGjNR +BqmwbV3/2Vky85FYXYwcOIHbwI8twKtI4OxOiXLnLkYZ4nNXLm65ckR1SfJhRyrM +8K/alI2l3AxY/RkZiUnnRGEAmjG8hwzka1Y6j9zT7KhFTTBlg0YR5TOD8bsd9/rX +yVR+XkgyxIshgcI6w7MnwdGt+aAGokGjZv+k09vTOnaFF4rcJgOCZ9t4ymnG3m+v +Ac4I2b8BA46WCxA6zeNn5IeKZL0Ibgv1NGbTW3vEzu2D9VNU3pqTm9Pq3QpMAp85 +UyUzHP+SV/CL1Otbg/HjN6JGIcgY -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 index 73468fa084b6f5f1b036afd643967e361d004fc4..f2ef6d20e27199c5f1d39d25763776470f5a9dd0 100644 GIT binary patch literal 4387 zcmai&Ra6v=)`l4vx@7>7lx~Jba_DXtnxR8t=*|I#ZUg~I=@O9c2BjO7mhM(i@Oak$ zukX7&7klls-}hqgyXRdSj3A~(1)zZuM2i^M>_{c#4}1V7pb$Yc0z?oE{FQsa2#l(K zEsR10M($rJ3l)I!x262+0Kl++26TL|4w(1f5f97=Bx;sTUkt6I4HZ%Xm!U5v4WpuB zp@8u)u>Nl=DmFR_m>L7y0;vSBMFRrZfkZ!^_2N#+k1W@GxmYst$>a}1V9s}_QVf7TXLm8vRiqjmr5fqMq6vgT&F0?JILUTbe;WAc{ZIMSSMvm^W zl(@H@opDWnR3BLAXmSHr3tQ(>EW3Fg_aEHje>zh8*p{^z0CUFu>Kpx3q_9Gr#n?TZ z>`)8g&Kt(aO>cNY@6yl7S5Q)*L^2(a+Oj*1*6ZNUl0f^Y2g)9o!KtAJ*>xTHZJ(!3 zPhoP&Kx?7N{^Re5k~WhQ=}M+0kKYG%GSBAhGKlVF3nYpc1_Wo$>^R?xkJ5aiFgE@i zN2bJ5C3^$rKjofQ6;kxE`($Q?S=&QkGw=Zs`nd|Im-f|JlWPI}DipV}!B(QG{HXh@ zExIR2UnnbpF-FZuuO_hCcfL+26jt?7^!HcIhvGmUa`S$&x-n*{scr#e9BQ^yRqx_@ zzghpwMXedf^Nq?FMg-0wR7G8&l_L=l(#mMij3Y#UQO(NwDe(W9F&U*oq z5=ujZP@(2dJo{rTVMwmuQ`5fih(#+=>7a3(?pp?h99nYPQOBGL02~DTq3ZupDCVP+L?`s+V><3-@oXF|!QqW^-pzCRG2^Dg`mH94TAP zAY)GwbtK@!*qW7)YXk?OGs9z3s}eGYWv%RAcd)yyuM0>U>6pCr zU7c(5QDkLgHO1md&I0dw4G-oJuDb0QDY<7{>^yo5Q)`q~)1?&P?cEJ+rUwugos2uo z%Ggbf`CBb2-2Xmt&f4VK)J50VfRcoK+uX{(C^R5C@Ef@~*eZeU7gR_Q_Hp@rW6T~W zah0KEA(%zP7{7gDKy7j9c-9WAT7i?hyx2;*>c&0A6T|r-TLP~h5;+@OJ|&3MjNDZ`xH* z-F_E3T29J?E};;-a_BJ2gg^gQf4umfLX0g>FXV&C{G@A}etJ84!|&@$?m>g@ifzXZ zOc-;H!NBQ8N$R|U=7i9j%4=nX%FDY8AhKXU%;v|-4){#e_@`Pr8Zqwnf!^a9GktR$ z<1n4aMcu9MfCS&8YT|5*FZ91Sf|&#krH4-%dcBoehPhH*e&4^oou%R>9}IeNquxy` zIJ_~7RaYUCTtDLXp75*Q{c{U>%fRX0qJE;J6nZ&{{G&lxvI)Gn^7fNCc@8TPHw^|` zdBNUg589)xU+0YHEDV%iQwJ5kzRb=c4!QW)2th(;0!FZR^mS{&vTM1>x_+p|%z{gC zff{=p>frb~^ZtEk=f-#YGpb@T){xKa*^55u5To|5o7^Ow?(s?k( z2YO#SXlw(lnU!cXe0g6VisYJL@1_-*DZQ4sJ4JLoOGN(-mHj>~uW13$n%InvkH zlepX(HC*jhHiW$t(^w~M7~jsF%eled!eSCgmoiM^jJ(LBa;Q@p3g9!%IjF5d_pbR; znkY==c$?PSOW7^Z3+>d=mn4NedyeKyv!kt1#dO`bI~h3kwA_h4hO3pWwMVDXcinx> z^N2D=8AXsa=$L8UlBG@hy*X@R%hMwOhB7u6`)dRt%xl;0%PBO$IblRNe+x4-kmpbe ziU-mth5lv~Qo^uP-B&lIoxpJ@CzM1`c0_mA#_WQ?jI38`rZ9zQVX45@jOxfgf<{+Z zH~pL(>AYACQ7k&z%2E!1p`FB|lCK6|Tdl%E;?zZs@AD}9gLp}dQ0NQ`&?PkH*9U)} z5e=%TLV6$ONfplm$&R)kFjHe$*A3x(*g5WW9Uzy^>G^tLHFsgwFG0_vWm7u>)GDcr zd5+v{!3Z3@f4K8skYPUp5!m-&1orLUy2QZ!f5zeA08sy$?SIAQ{{^mz+kOrX!`W$E zzCe;gp-hSY2`-Qy<9hb6}Nvrg-vNtvKqP!_xkS33KEJRO423$3F z%Gcg6d_v$RaK*Y%v*Omh#XM+DZ&#t=u{!7m$-s}!mLg+DpYcp6Nj#B&oh%eo{(c0O z`tda6brnL`|Bw6E5nu%(M1|oGW|daG}BHFXKl_=Zx6WzCV$B&~OvqAD!rWfnN1^Y6}zj zsHd^0p5;gg03#WLpfX%{?`wIQkyVo=mrPU%?Iv1}WwF7+a4NL&8yc;}4c$R68Qiw> zlqo?-GOUfbne|in@S1beI4}>7KLW)bV*=ay#(>R-kW`S(zh7%YM)ZuDnevH?qU;GvszHj`lf`VZ>hLGdMHZnX`gEer#;>*H_6^mT-@n61yuN3SWAysz>|wcslXP`8LfMbp zcR_jD0UBptLOa^E-#p%Tn;kI#Q>RwK@RFLtCQ$k?EI1IT;}eYSqrA4^HMGPK9_0-g zKk?1|c2k{}tW#eklFskBns_k$FkAOW$ZCqQOvSLu8ZFiA2e)O9^XE^WxLRI9*D1Z5 z3mA8uv9uyzbcTBz^-s5Gvms)!z=1@$PbBqjD+_&rczI}UZ2s{qxoKHEKUXdY8QDvv z0zXeYhhELs6{OuXO>73YEuT}!lQPKJ-ixFVuE^V15=`_E#z&j#>j8K09c?r&#je^t zo17fTlASawGh4e|!MJ9&J99{9Ht^9~sV5l>t+4Y1$$L=?Rl~cbK{;2PD5>Wzi%9;f z(097CIHWN*#o=F!`3iVbecMSORGTVI)dCg2yO>=o&1ojt(vKy#rvg?v!Q?zG9H(vI z+wnX|(!yL-;F{fY(T|@O-F1sH36U&h#N*@58nKw%_GEN-sK>tyr>&vGX3y@%toHz3XZ&2i zj=m@H9`qk)^7Ox^-|uxaRXzjZZRK*yJezrHW3|k=Xj@d40-;q=+|pf{Hp~kNqIj!k zHXcA9XfCrA^7%Jmqt>AoAJoPL4|y^sDW5yqMKqm$R;ON2-XKP83rkjqS1 zc6HQ5EihTUei&AXHJWbP*hSXx?DiHBr#?8s(>xfR@5e(MKXv-_M-cbo3_GkuP@{_n z3Pe;}$5{9t27qZc-l)O-c-h|*n`o{nLFD)8LQ1VMPUy|&Y9j8aJU~KAkGUj~S6-(6 z!Q2ixzc-1h@xVk3PRm#gOZw9zohvSCK{NNma)54Qj($o&7&|?QNE8 z)V}PrezuMzc%stn4WTU){uI&qJ->psFQ)wQqOS+hl1Dc9}K8dyo z%a1{HE9(zSlPORw`xb^IQ&%d=8|5mLc1Ir%+SRDPJdr3J-)d-XlozNOwG$s^<;C2z z_2{Eav*0&712*ihsIcmBcM+|U`r?x`-VmG5os7P_hK_dE9Fx1iNEd?d<;a{#!e8@L z79Zx;J9%w;&F_+aSPnJgx9zmL+qr`}amliPAXk<)&6N5Id*9>kahi|boZ+Zw@fBLO z6K?b89rnLOZk6<1V#C=Y-_L$Q9AD9wRa&4O*L4uw*RPgCf=b%#HZs*S0X7o6G{O$y z(XXRa$u*jWKZIu|3)`?ws$r5_6XX9;bz14Xj@?Ku=%H*UNQsYedKI8q^X~r5M(C?& z9fY~nd}P^GOQm;^kUVmrsdiG_xpI(eG9~lLRrN??NW)&L_3rE`!Eop+eWhe(nsF`) zPlAexlg2Nl2czl6@D8V)AzO|9>+0F{9xMT^)-!4k4R5v<#FSOL$CV)PTa}MrBv?>+ zKKk2>SBk__d>3hi)C?Vta1no==)ADXy`8EH{Tj8mP6>n@K-5S%{%>DbiujgCjnQ;5 zWstbm?*8sqTv^clKJ*H9(|K3jS9bwUIQk!)R>EONvUG3OK-OLetw+m26Q2;^;&>jof+j8Q5a)>3K;&es`ZUIeBC?Q$~5- zRCBPMzVnm2j>NN|Cbz1Sy%yxq?xDM${}6DkFc{_U7za}3yTF*AG(bh_QAydku6<@` z^Vsplx*;iFipenTUDfJmcUq~AwB^*TSrLUomb5pew5Hj(=p_}w)v>p3QYMP#Yml@~ zOQ}Z9+nB$nPHcywwuUJIPTg4X#MkA0n?qeN3_m{76Nh$u680Q}c#++4J#`q? zesW&?>0`~nX+VrwQK>5%uL!;gb|G6g6oN(&kb+f%1%q-VQ?GL!Ry=+t&i|~+?*b(x zymRPMj<6vFIEy2v&&)AMUbrP|@O{Q(oLQ7*si)bUxt)ET6uN8g3SKfAI%1m2)1gUN zUbTcr+SpRpuMOM3ouRB00@bULbn`NS8U6`+*rdEk{*@Ae0F1vY>R$%{HvH$nCIYL0x&A!~!HigVr3A+yIKx5^5Qv8XCd9$} zzce5S8w1RQ1G0*E0kFly0&ruIGO#mbW;JP_AiQPX&$c~M4u;~eEaD#B)6&$NAyv~x zekn?*qez(oij&tt*X|$b+tSW!+`Ty{JoMnIlLu0XTzaf>g^V0R%GlKWMyLkPyLNFz zc>!BzGKl6DZ#hj$CziZDgS$IgqA7*jqkC_~sb?fXpgVmrZoEcXJp0m_eO~obN+99T z9+&;lIeG2)`RkO&=9J4%WE+?-bt?2)aB3Y5@>=YrWwNn`83$O zLVnXl3qwO3tlS{HPGTBHWFaV%QKbq7>^4bbnDtSQ)gDa6ntBM&K5%{0F?nu{Hwk%j zhYf_VV{-U%xxKVd0%^N|p)f}E(mz2wEj*BPZ`%bmir{35e#VVhw0!$UimMz)O4(5Y zRgdm2CS&V`cY&E^jk@7Y7-dzqxv6`Gs+ciGQn zZP2vQ4rlKc#3Dp>1WvBnxEAf!f9Wh5{oIbeQBG%%)&;&1obaAWDuXdkQ~kDzn{$e` zXc0=*Yd8Q&@_WW;z0pMSW1}NQuU4jeAroC*zMXL450O?guSRerC(6}C(aN)Y=}F~R z>Nu4OPk|^rDR9AI{GW2P&=Q6tK|}B2oYHPz&gyF%%A^%T+&64Gz~N#(MS(O8^J9|^ z%uViXYewTR=Qkxi0%}jQcqR$~Rn11lBk75UZgPgN#d2g=KccZ24!&-ajXPTiW8rR# zk^d&nHkPd}UuMgk*X?iXGAmXTjxfra95qM@viQxF{3NBbTfFq97n!(q_XuE;gr22f z(Fc{$g%e{rTy@#cu~NRuK56XeH`c8}#e$aYC|ldD?}EZLUDEKcQ~AX9$7VG(FCkwP zzMEDcsfS%f(0T&8B1=z1r_?vdbgC}g00GQnD~bNuR8Nz)BKT#L70RSCwg-+lV_Mi_ z#ua4AGAj&egV!U$56&I+6EDn$Q$jy_o}6e0V?c5fY6P|$+Fr?+3g10(;P>A1QWd+) z#^XKHJzF*!*^}#6D@cs9XlN=_pn;N1&Ui-x(~X^q{O@VQtQ3?Vd|7(zo9Bc?qWzdK zUBV6Ha550hPO;aQyzqg#R7Ocaj0uSyQ^w)a(V{xvweoe3QCPYB z*sGz#NbcFBz&G$En_|`e?yJ4HiU!8r>~?GnzGxGChCxqcNEigt*!yq}5@F?14fT;a zk;^KJUZU+Z5ACGaFKckspVoko{fv?Jw_{p9_6p~CR3%R*+wqE9DlXOqMV~wNkujB+ zMA_Mxe&we6VOZ`*_rz5{<_CthrO&(<_{hS%$+aHN@`2RGCC%Rv^3b{K42E1e$@{`n(p0@;O2E&nn-I%-pC+&9h!;4Rl1Fo0T&o`I^-r z_bl5~YNdD3nso9OTTP+S;-8IZEw#4?jerMVUGU2^(6W9H3IX6sJuSgZL`P*cI&IA?pNevtJwDMFQzTR$27$EH1$MM; ziCg10?~uy{`u4{yw+C4}m(we&lywtg8*PC(9kuu~ zks-NgHy9Lcrr(l9u1kN(wenCOBRLIu!bMyh)>x2t94tk_Q(JXn3V-Ss`VkDp$NGmi z{{NkF(I|9mQW%;gD}gshKKAhS)brKyniw-;4e+4Tpun3$cGuD@|; zTC-;cFEf;<5X||l``cHKBCj*|6P)l0`UOt+2~XRWa6P~}zQZs0t^K(LXy1obkJ|`7 zmI5zeu>tvD?Q%TVl&2s1_9(&%Ot!x`^E}&hJROzfQ5{eb$_{_C>p!pF6Y85mHGG1X z>f-tb(-Q_GU{s7TQn&r|b1B!fR64?x*9YJfHr`-Sd_n;q=VTU(v~3_*XYXfR%M-WE zU>e!3mhvQnrcO~S8pRwuhny-HvG&?AzxbEiw?yWhiuavyBI1|cXWn8)uKv&Z~ zV)p#{sMU85b9m31H1j5Cd@YO2JSZ8=@NE3QsuPGhBx({q4vbBxF`lyQTQi6V!5(dJ z%Mka*k%(^&htSOthiWaH8~&+<1r>@8F4ggaTo~b{#D3;K^xk&Yz(qpp7+PD%wdqnq z-(jyH3eGXZq6lbz<_i_ye5I7GA$bS(F%`XB%5eFJml6s9ID`&{lkH^WZMI%xyL5T=nx0u%B^YI zCuGx2i@W$RZW5~_Krwo!y|p4GE`&j~d>^u)Mn^MoT^cr$db!*mcwFOAsa7?gbJ|el z+9yiD-43*pWz{1EE{5Q|huo&b=3FBTQSGbZ9^&A3uQvbJ8f90Y#dM$C>rJ5ty0ynr z@d&Q&XEw!3MIZ-?l2c-SzHG1^J7T0tMO(RyTJe2y3~XhE)WNL`n2;7$t~86pwcv=~x{}2ab_!ML+Ytl6h4Pi$Uapjp!keS+2@O-?xeDu5;&F`EAcCNROgtgl}OOg3^<0KAzE%^ zld``uD%it}ilU~va<9XoaT42*I&31>FJiCbI=ymzn6p2@Pr=_qg=_N@J_X*+XUxi3 zs!)8yoK!?o*?7`wJI?Z46NP2zI2pLF3jN+VA#V3?A^zsp;2+2W!y4bH$bqhr{3LX* zYqoUhs{$uYDh*0Q`piz&7xupm$(T~!ECe}SvF4gtOAl4Kr#PVDJ}wv7OB=7Ok8R|c zoBhJo8_QV&WHEpj)d1*n?oRjW8Yu^NHve%l(Hy68Kieax^)V)QD8M5!KwgoPSOtpe zDyT0{@FF29srhv~JAhWz5Vck=F&VC$`z)~QgA=XFT#qyxJ+wJYK#AH*vcI@xP>$p- zgT{Jm0C-_rJVz;Y{X8^a&4y+5xLXW?!vb5!`$}h;)dFc>!)NqYrI%zrcv;V7_JqYr zYQd%^FF$-rm-8^z;?sM9jgoFy98v*ynDN6E4OiEC9X|K+GS(_AcKrm_W>9+6o!i6f zR5F;#n(Cw5HMF~VDya5(pugCvy@mZ%-wxYy6bdm<+lAi&78axL-n?80;C9A;zxFaU zoj%Wk>`fy2d#F{7PcWQ|96etiyVO$N{FID+(RN%S;wz3*zX&--8KM`web2J= zBMRjwutdYMIi$Mgt3Ey0%C|@x-FbWq7fAD6ZUyOZT?$8sQuubiijJiO#Do6i6MhHu#LMc47M6mJlLIV$e)wG`og z26*qk89lO`Tva}y5T%uzqne{y^}1X=(CwiUbR?04IR8qyEioX*?;KXh|Gw46aTQDa&mkReeV$LZajU0OJBsSY>qt@pR~nKCHBc4IxIBJ0%l66& zd{Yby2{K~d6fdUczY)H#K&}c!xN1f3Mi_Dz;_z=&RpRzhemUQryEa8?D~?ow(Xw9E z_f`HEru4UBpC(dcy(d~^x8B}XWV-!0A(6ll#D42J-^9$@6;2^W-%Uj$(qbTng`9Fk zFvJ9o=`*8eT*%#jn-nHtv;$*&>-!jPe`a07E3ScE$)RSrRW2U(KK^+Wrc(8Ab8EFJ!kNFAe{hjEHLF>VVOn3D!Np zWNv-LB%uEix{K?N5#DYo)<^cC^dP^f)jV^d$6CeCk8f|b-g zD)?k1wx|4e%!YUcOo_Ms5gukwtc-e~^rj;|eb^s~v}*4zWb&TW$cUf8HvVunk5s~= ztw{QEYR&ehw?iF@FPcR%v1j2_Mh~A$8{bYDo42#}j5XJ@lc=9izw+ShTTK2bY^Nmk z#iQM0@DUCoZN|4FPwm3H!9Y|iS%At6>rUR8U@~yyacriPcT~ZKnt;?Hk$6nrk>63% zSIKO`2()srxhn;!`@t9z;v)AHAsDl>e^dne{E7Mpi;X_L&%t=G*@@jnedfDBYPCjt zxae8^uI{+tEAz|jO~@q+p%t8&Vs)+h)+rpDHjv26WgKYH71hvMl~DOOn;iLlK$jgUV2#vX_DKm@#~> zohbDDsRukN~OeX z{(!YDo|dJuyxu}Z_%f4HL`b(yQnOQdAPiAdDDz|M=jj%3A=GURru|mZqnw$HW(@F| znT1#%D*P3ZG~AZwK`G~MERTeT+d>q#jK2?Xjdf$b*+;u8#Vb1owdLtM&DpwzI1gHr zS(UzE{pj0Cn#h&VdpjMZ+{L3F%CjH7_5B;%?^dtq7@Ln**=OaET z>QeuZ@=4Rb@+Pd71Es65T1Y0Zgbhlm2hfeMW(DYP`^CM&ABeceSe5JG`|KP7#k~1ruS@wWehjf9i{#@Vq+u+Zg(3mb z%YtlR)_)2fh)R$ei_*_qRlgj&k;=0gVZ%FVUT}o3eg#$l3xbLMc_#n?*cezWn^q~a uJd}-fxwW7H*w&c=YBk6}Udc*44EFeYeHJSl79JV3Sd*hn^}~Ow-hTkP>l@wx diff --git a/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 b/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 new file mode 100644 index 0000000000000000000000000000000000000000..c3394fae9af893142c035e087fa752c5225eefde GIT binary patch literal 4221 zcmV-@5Q6V8f)IHE0Ru3C5I+V9Duzgg_YDCD0ic2qFa&}SEHHu)C@_KsUj_*(hDe6@ z4FLxRpn?WaFoFh50s#Opf(Atf2`Yw2hW8Bt2LUh~1_~;MNQU0s;sCfPw}XdgI(fT@^x}(`3PBdm*Ho@y&3CpRAjR6oM;$OHD}Ua=k?g4zRjN z&8&g~)8unA{ALdXZg=g;)g_slEs!#9S%wqAuQ9vt%lJl@;8lFZ&6UT;pa$K>LMD^cWp>u;r*zC(?(!;$|*(0aLS$qhYu zTUgS;*sYz^t`G*GC+Skgl#0|qKv?N;A-OurZSTalB{C-jKhOb8Qg*%==NrW?1Ka?H1<^kC zue}11j^=3pMz#^>B3~l50cex-pzF2+8OR2@G&0Edt)CF?UCE4=0f9|Q6R|uFcjbvM zR_LAYTuTbh`C7H_|DomHrEwW4E4D+eWVkla@DoY2_pUjGAy@4)Wi%b9fsYk0!Q#Ag zY<>;7{uPXBt`%R8`x5>p|K;X@8^Rc`>tj)HetCd0L19U_dJK%G!*TP$Rc_!1sIN>T zD5(N@bD{y~f!>+3S!MN?vZQ@{tddtUB;b1g{(;S!qSd!|$DYcR2M?9{XtT$tV)Xx> ziB3zY%M`h=peo>SQ5)#hy0v#Q>P#sspt2IwAF4o8o0G$oUs=EXZ=%4@2z8kl*j3Ec zVc^gkVWRzuvmsz?%lMR%3EYOkZL5XnR`$D?Q$Iw%O%XAb2Pg_R1;HCIjHZj)1x&*_ zRCC%P)vVbEGfWIw_!6XWXc;afqfFAy#1U49mjnYUj5N^HDS+2r(i==k-YVmjQ z2|m4l^rSSC4Dl=99viQsIxr+UH3))Ww$a>d|C6V$6eU4gHE;j(_ z>p?L&Nm!zGY2vN-?C^B5QJ^1ptFYZ^C*w1t@!WXSjpMOgpN`JhxJ8yBpKV}mSPbyf z*BL79>gSq2=V z#>e_HOKlAOHY4@PUzNxM_T+@VMne>uwus>crfR^LJG1&|88u@r(^Y+;rBf`C!Raid z%PAU;+_Dr^a;*tycT)F-%8$-m4)+8*U>Xe1;k%#E1!jAwmFmBA zvRML7E9?a5oYvr$`|<%EbZcw1cmDKrH>Z_%pGCyVTqTl%V+Mi)FJ6mwVYTq#DFy69 z<*zy*L3T@(W)O#RGwFEY<%jLD&Mt605;axuN*{9n?9dVRy@?15J3JZv&;$bY=%mU%PDmr9;p+I{m9RV z*ZkNIEGA^t-xv%`w+RS?LiL3ZdNODCDyWG>qPtyuF@H7p`*#PPU^2$yLbMe~L=Qsh zgJu`z;rx8@cK_p?vYqz0a7|#ggYFRef;#0BM2J_+A7xU7JmgbKPt}FNP-XT5QMja& zpiqA@fBcb)mmr;xJ@nnOis5axzEay6(F3MqNK#cOYOkB&AlX0E?%f{&LNQU zF1^c{btRsP&MF&>=zph=`XWp8fEfg=+m`t)B+ABMTxUG<7s2BDQ0V!*x#yP1iaSIs zc+rect$YCR%0>qT!|-m&=~mCQB_gjzMgj0& zX4g-Jf>)mb!19Ol=-zK!>{>FjwM1IM>H7{iqu{o58m zMnod1G!^M~+Aw79%$N+1^2P1h4X~Z( ze|&U{tSs#*?3_BwL0bb2>@ARsbz%ODij;z(!*qcp`*8`4ZyJ(F2WSfRGIb@VT`?a6 zt1|Zkg8&LUb)mevsp*NNi$YAfs8xWZq9_uka7i%Z0f7gOa(l^y=*=!Lt1{T~h53 zInjFdRjd}OV}cIA?%5G>Z$F1IVERB+10gvkDvMz3S*>fVKj3Do!JN66CoZAWIaFrl9v}ZyQWt+49R25d1-JHHj+;%k@X&6=g zTB>|F$VQu6hmVK%+mig=b&E#6Ip6t!=Qj8Mxl5ok*_TFLB1Pbl$P^{h^W(^f)eO5oMQaO6L^k&a_}Q2MW}@RY*~$&hvYfAUU$Rx zf;_Z(^dFLOck_sPz+&m~_uBZAG3ors#kvI%8+(Q4I?FjC4F@dg~rgUcf6S~9F z9(_y0Og2|&c_SpwcWg2)grz{gIdV^VhRhse@FG(jU(aKnI?96f$Y zS)5{lTAslO6pn-wN!s4QH$K;Wp{L^)Y^jLu9llms#mBDeJOsM$3B_qVp^@e9dZF&a>wvUpi@of-t^o`(Ncyx#TF{xU zFsmR96e>r;b8X~z8E+)@b&W051yrqQ#C8q2R^xR{7K~2lsfQ zxjKbD?wTJcg?KRmSyP1){X#Q;(jZ;kE8`|4WSXpngK)*xZ~iYd|cMgj<<8RfF6(dK~JmgZT)C5D?}TJBVTDoJWctVL|@vJUM$` z1mSp`juk*69)bLSOfw`4PYJ@iul70hXgGPFguz&@bC^E zjXF?JciTq4MSk<D;-@i=an zH@6H;+Upk=n~$`;(^Q}i7zQ&ws|B*;p(h3iE-+a1>AJ97y<# zQ?B)Li|pKA>!!{JNR-BBQ4>`;q?i7iSnE~W>2KC8-qQaY7+DyIW6w*L9BGU!0B$%< z@DE?9zJ$n%fkRjqW{AreqUz*#E0}FLUCex6{EufX^SR?71_b3&_<`{M^`(E2xQ+f( z;yJNon9073Qx zd!}}`DT*|3Voz3YC8icW=ltaa*o&v#vi>Yy#1>g}`nPftsq&d&bVeuAjT!h1spKl( z;Y`R))OaPA9_h!3M~_NHq^lwYwiSzC!2*XjyxM7(I}jy>xeP=Uof~7+hvPs3lV}M7 z3s10xq2OjRG?LfqM9)!Fe3&DDuzgg_YDCF6)_eB6u|?x z6Cx|aZ@(C)=e31t!hKSr3NSG+AutIB1uG5%0vZJX1Qdy85~ufG97a}O7z;%xFch3+ Tn{osQX`OMNnGfDI}dfD?^~y!Iye1#ezWUtIFu?T1BWC=7$apNJxf zSjO=#Bh3LWhSW7=Yn_r~l^iCbSQ)_wdjnk)Bg8A5n;5uk;UPIyyuJR~?ln1Vfydb@ zyWC9Urc$0t{$(6dPZXeURYPXRM@tqV8jz0?PNkpEgMu|FWwrA4^;pO$a`8pK1xk(0 zdaFa{T?!0ko23SUJ~{b%4g0?otnJ@}#;8U0d^qo)E>8e)Sz2XL?sZ0C~@T&O&yjIevq_ zdMQ$}KH1@wKBC~~fL{eG-ebX_yEOG!dYq~VfW9o~vs^VeienSH?FT&T-uZo_Z-%t5 zEF`Mn#^K?U^S>u8yjG*-%(WBHzoTOOSP_XWRa!+$QGBbxPNC?=*bzX8D?tI=3n{f-Cj13c*+T;;MHU%2%&x3QA9=39D1AJxWSqvr zBxZDWPco92=kfvGVB#$%DMPZ>_nIi5sH)Q%nFb|E^(fjIhG;xJ53~E29iyI;q;+wC zC4Jw?be0{e~Mtt?%LpB ztuIL?b{W=}w5&8TsAhji*vMKnw#`spsWE#oc489)<-E-5NXf0)BPsih`jj`am{27n zv4~V0wt++eC?pIY-YP1ulRWBO0a?UusrxsEb;@x6)^@ljqEm8CD%U8$nh5VQzqL6r zNX6>grLY+3s-#R0pWT?cAsh~qHvM2zrTWr~Wtpy#ofq0KeL-RBfFEy6y9Y{{Q*wCk zX4?3hC20dh;4n2N>>PI&Pa;k_T}m&o71>cgOph+cIHTFBz1*Dh)g!rVZZKpNo@WI% zYb8>7-CtXyzU6ml8b4ItlQ*t+C1_O_GP)g=u_9?`nPiA1lqsLznhvUl%!%iL1vm8z zF>`B5lqrhu%tIRo$dvsSET}BpLH5)LWC#;fPHnSA>bPOF-OJGrY1_kRH2#>1O(po# zhQ7}>DvQ{9F{uUL0Ist$*>}>Ts^_lETTO$M;OpUIA(0d!8?!gW&MdHxF$dnK91WY} z(bPX?ZYn$!n7$TT4`R`&bvoU&aoQG0kU4&t**7n2?Z}sGTLYXrk|Xp=vGTxrRj(3n ztIjj=Lp<=^dLV8^Td={-c!uQmXVN#MA99iWku97Ck1aJ>!4ouyCFo5AN zpu2B{-c%Ru?4=h9^{ZR7+{-=1c{5Aeg{(!wHKclyqPjs*{0Gr*gS%<4 zz<^M@y>{`@kzVHanS4B}9?pdWu?kV_87B6Z4O$zk>Q)fI3(v5yAyr~szLZ1#zg*FJ zczE;Zk=%%E&T<37V)W5v56XI+kL(yFT|BB` zU?@J$k7y*O%9oURhWNlwR@@cQ9%Wg*y2L_lk1uI7Ny>aw7v(3_HRrfbrL98lm`)`b z%b%UQCoHjM&gIg*z7Zdo3zJx%E90--iG=rGcXu%lg5GRL6t4R)`Iit&O2}Dm);uOK z4+w3DbWW&gjR-2q(nrg) zn4zZce+Rf_iAUa1?FhpAUiE&pA>&zU^aUClVK7#JKb>2|uoV`*E)XBi&;Tpg18nMM zmDZUdFdV#pxbt6-VLzh5u#PQOR%qn!iLCNHta>0*V^XU?{Q?M&Wh5p>gYS2Cc9$L~AOZu)rxg@mm%>bJktlEIzz2Z`PQ#VTZU> z`ir{S7-R)QOj%@QcaYD;?lf@-R*Ez2^M)YDs{1ZDnAvfDJ<_I(@g@R?3cz(>W#i^W zsGA2K{T1T|4q;E(UN%xu2uB!7eD)DY4$-NHCrI*obPaN@7A;VB{Y<(M{sEbXzd`PZ zzjCZub-kD7$0Xwr4e3=I^%Py(BN%CPfKmf_imqqJBZXUd3h;^ z-ZHNp-gH$w`C7TLWV#@k+XNiMn%_qAV@0y8;V)d#L; z2Bjk{1<3x@%ScIVDGwJCLl$HEbk^%p1*e&jdCXjqItd~vLss!MJ7vbp4x_SO(cebg z56y`)Hu=*S6KkogvU+fAe81|LM*T+S-wB-`@Jx)cEfE}^;;(cb2A?h$+!V>mIEE8k z_Da((8zjfbXAWVWZjqt{>W0_sg`e692D9bD&ZxF<$5!aYYqV+#BL_h(O9xJ+r(WUI zZaq&4W^ig}A z5;qZ8pm;bjZi3iBQhsB;{l$;Fb@9NV7%+gZOfmrDF#_+fno3Z*% zEb?V}go~hoZ-a)`12kFFfv>lh9x7w=3hSy(E9|mvUb-!)P{LLG#{W){KQgfW5}6AD z&uuQ8xm-dqHu0K^(qd0I%mbO0apS}~p0{02a$?IHCtx8jMA!kP4NC-vNg*8zY{!z_ zG`MUr(>^0VGhE9DJ5bEz?=zF_pYV5(>0T@PsG}wFxus?~9{iT9%rz!L7rI|No)0r&3&B(%T+aETkIUbBiR^a0 z6OM+xx0qtu4P{lIpd@R@Vd)ONW7-VB8RTE}j<|T!u&=|p@3P3l-%&^eXv8I1sOV6! zrSj)`7Ho{a76K@^D7gIeE+R6LVG;=_eIXwtNAcwfEvZPb=yfBKc1DmA0oFz)p?nxF zrF+9uC!7*GX@PjB{pQ5?;GW#b>z2GY1S^;l8)CU?!%aw=tYDffi#TCS$~iWk-Z2}{ zQAi!A#Z0k{uQ9h?X6F28!x-3*-VNJPLzz0Rsufnhyo4ih4q2Q7SgDjW)po_{OxeQ_ z3RKkl15~B(8GD-Y0vV6^S{u;@t=7YGKo}-=fS|B`MIWbZxKOZNXHb?wOO4w-Z~Gc^ zpF4thx7pr>t^8gpPEf=m++7i5wK2`KFm;K(CAlz-b zI@iQo+vtj+()zS;WiaXq8jGrKLY`&FzqBIoj}cKpCIxnN7-Y`)GE?<*7*NSt9J$bQ z!WzxPkcfm&zd{A#_QPW8L<)BU_m^@8>-diS0KB+oSub>y_}RZmKEyf=eW#uk3Ez)p z2(YioQPNLrPW^CAuFW^;sEcW;S=KV$Rb^sY;5030voc7oC0)(GtKsc6IX_DDSGnLS z<@2xnXBI-&w{u=7V?2T_43qe>;dc4njoWKqKeL#5E&cR_qnqS`6zNimMcJT}Mqoq8 z`sc^KlWi%w(YtIc>+j(-r{qv~n3vYEJM$o2maGRMzXmUknY5eE^zWvn)xOX;2mXSs z%K<+lErMuF7d9tm(#_!fNVGYMK~ByGg)IdD*qiaZ{<^DPsNUl9w{(ua8ws^tOA{Dx zdLZdh78FCxtYv1Rv(v`J=zH7DD(z{mD%-ZOh+~sT`kQ|D&6WxO#-G7G>!dBV`e9yH z4>2y7ltAAYBF0y@G)aQw0Pcdc98cPv<21@QBmNA1Q3$l&#{5s`hL?G~Urr9DX9X=t zdz>`zFp~6>G9yuAeRLQXggv;7#Rkj4+uqw2Tz|14wWMaYnL-5#gb)kv;}A1 zcPk7~k&XWtV>V@abE|GIW_&LMxgZD8fpUNKM-LPIE|Pnk3L37c^X*GttgS>#CQNK3 zuHd$C52mp6l<}^(qnBb7bM{yDjXX((Y_p+a%U~lrX_u}e_%s^>Y4x7J{w({!+_&f9 z`7)4*mTS+dGc`^3FpW}`f)yHPkUIfV?rP~7B+ff<1hxLs54?_{7DX_|qfK3en;kF$ zO?X?C20!&GW|M0R0VysP3k0M4uS2ts?a9o>OEO%huq{y|DY?A-XxiGGXvS^CCN}jKjUI+uM&K%qf#p43NJ|x; z2lT8`wvNJ@?lVUk0-Mu)VZgHaNrrCUHkN>Iwt zsS*PE`akb`p8v=9!+q{^U)P80OwmcmSHd z{;ymEL6evM*CH=Qlc)ccQb>r2{;t^nI>eBN{~lztkoyqP|DH4uFn}^(W(TVSN~9qn zp(KLPkW>C|8VQh$2*O1Uw28h){EQSpEC!(WHfHUr{Yo8l2IkBa&i51yN0aktnNzp3 zw(MVY{sSNC8TNnG&zZN5t{G#qcEqbR4;eO&zdu6Xac+&{>u!ibC}4MSx|8p|y}{Xo z55gZrnW>@6NjZ3ADHQkhY?q@gg|m-ZM#WWNGk78Q`=VJ74$c*i3f2*2o|@qwGICEq zXzmjpI&65(D$=>yz)9z>=AD1Z~i&vS8thCWaBo{!pE;uy_T^aFoBrN59} zkL;UVf-0##2<;L{lrWmOJHh&Cl!*CMn5dxkp{58dnzMSl$^Q3y^XwsL% z@mOk2v7}#?LXv~XQg7o+0Il6FUva9t4?iF1kBy?madH>L&d`7irwqWPFAlhw_cBG(oI*Sl#NXgnpyY z9TcX!5|)|S<`tr8tF12f%^LR>=bS}bjgecDIAetZRD7Prv9-QJclX0@)GV-zz=Q{X zOjc!-YP&~~_U8&$aX*M8S$2L5#qxc2?y4^`g%f{bmwC&BYpDx|Vw*=O|C#YfPIk0f z>K_6;XJR&LwPClfPiDN5edFI^ThoO!v9oqyD=E5_UmiZcB7IA(=IZ>@%1-2$SF;m< zrx;OI$@oY^C#zqC$+6kgCPq~Okac8;as8y;dIZ!wehOQ>V}`8ZS7rWX978LleZy{h zj7E_!ZR<)S34McAfd-02G%`)nDa^CvS%wXu{dgnLAjv3IQ*b#0GMfi!&d%AR} z%lCwIRfCP!^wRS?!PC~_dF+Z&yR&K@&Nd9f$Xlc2!gT4ywC{4MWr5DpJ@9-*RUv6i zMwmS}Ua^I9yuWe@Y9ne_AZ;-?y;zXM+y2&i&wBC#F>brGDe{S3Cd7yeDo+Pgc1}m`VC=J{d)nf?2w}x^Cd(>%`lg`<4Bb6oQN4G3gy7S*EGVdegR)LEmH&RJ=9u}wA zb?CYX`S2|=!=`lQ$GV27XgPTr$F4b#3R$ZHb^XlLgP-sYy#?6_#fdMJh$9Wd`JrT? z94=j2Po;=F-5;n+4)7Tieesekr5iTU62tC2-wr=9FZQaRw$!e5A84jDzonF_PL)-+ z28|q^9Z@{$uk_7wcI)U3VwyO}gs<7cFy5*!l9K&U&%NXT7wgR9lA25L>z==SdGG2D zs@ueJYqu}*R}_d?o|zWIv=Gps%PB!S5C|uaI?|hn(8=>0iMk#_A`a?>ZXADD1+3LR zPDy_F+pi~kgZGAeZ7z(kuo&lMF;4xcXWjm*Y^+O`F>4e}(u0R)aqu_$J2UCvGOsKj zf?sog(1OvlAhfyQdxz5S%@l>+6**k`c)*(}qAqA&f}s3T(0c{DkkeaffMQrSG#-4h zSUxe3#uCzS|AcXjuw(WfHt=xdPm6Fkd64`G+7Z|MV(|0xb($gL} zeD|F8xgSy_X=M$_+=TAP$1VC{%9uLNP$CrT%k4VQ+_4C{Lb_Ry!EL%Uj>e}i)KtSN zqMk@o`uR-SaEN#t(G=>>7kYEuQI>+@y&zZHWBPA8=B|Jn39<6^?LRlUS);|kSLEV{ zA)~vFz8%$62F|eAaB1=BO$XGqSG@(p*U9orZvi%bZ56BU5C-wnX8l&>;Z;Nb4r3Ku zc4yvcU0qeGUQTL#kdZ(iewH>W{6)^=SUwnKUfC3dl+Pn|HwHFND(}fpN88_tJ6G<& zlia8x=(eaQk2@2=xhrXb6KYP_-OBp|-I>D$ykFLeUsg882Q4G2ZVtmX^eB<~>bQ(2 z2QMelaN=7dMx_MxkBgTCk=x*g78Z?RiMi-W|y{{S@G-Me`{$F5$ zGQyM{MMv7sjJ&x+~d% zGyCj15_~5_+>n&LV@NrlNcR4to;M7oI&Ixqt?WF#q0ugU=~-Cu)0R)T>WByTP!7^m zVPx7MA0)UwU;Cxk*^%Z*YFsE<`12_O6Gnkb_xMRL74G7k2~kmBhSK(=k5Wkp$l!0L9e6kMFdzK%Jmqyf0zi2 zJN!eKI2eTP>SQVTUFojMkgeVS@qU)X^?|uk)~qUB*1_J`;q&@G`&tYo-3Tvj3~Ksk z*Ug2x2Pi|n!H*a*@N0B6H3NM!st*ii;h5)<2*efk&qBTwhYIXouDHO^pQlj__(ZaJ z?yP*{1{W)345y${QrqI1LUJa24m74!d@0iTgxHGt?Np&%@BS|nX0IbSQBS_gi#rkW zA3Z|mJx}KZ>mYAPJ)X~VefU-&!0WZt$IO9L7KRKxOD4BW@D_=dt5*L=jQ(mMg3RZm zi{!l#V6SuY=&aarI%i>*kS){qrHdtdgaFk4UOl6dc<#y*v-<~Qg{+vE?rgRsuIHme z|Eb9>TOL=G&H)eQP4~U1Im_@G!Cq6vTkE0b__030V-*+Ny|{Nom zDn8X;=hc}0?`Nh54l7Fgy1BmMzdZM6;(rV~Bltk>{0v=|+n!tB3c}8;^ip?O=9pjD zC{Ug3_aM1eGRwyLUIeJ;4rlXKbXe_zL26{(6Vlvb)YeHQv$cyoqWGpRcu{F&I83j7-r_k=Ti5wHc18%`yWln`;T*O#_ zN`8FadnB5Q0l#>ZfGoA<* zGY0=EI^}W0taMnbXVh7SjoC0no=0GFgvu`&Qau`3m_N52`4s^GseVLle&6l;sm27x zQhlPeKqAzi5>|y&1Q3+>EL6^&P=Rsb@gytl?iydYt!IBaH9;i>nGZ~9g2zL-nu6}? z4^|EGdq3KnApv9TxUfpYLWcgz#XJNH=(lQR+Lxpn5?kW9MXP-lR{jHnQmG!l=}`Mx z-Ln=I>!*wnm#PvIX)DX+;k}dew|Sk34V-W<&wV)qHcgo!8I6iwrDVt=e8Ha>k9a;E zg;th;wM5a}=n+HYm12r`p2(-=FOMjIjMi6bA$8+?+R#}HWIAA@u=H1G8c|D>Qd~0K z6phXaR0GAHvF?etGzGI+^{dj-!`ZCpwv3LJhV1+K6&tT4139bCUUp3YIpQ*{(dn~zPet#7XYlXh3kVYc^_t)9^wM=|i0a%pYX zZg*cqvzR9a>zT>cEFdI?-+O(KTXF&jHI-WzE1O83x|!X`ADX zPvfF23aj7gbeID*b1})kIqIapPp5EUZ{quTwi&c1focPAio#M}V@lDEu47KZ_WTYD zJV^>d<#6CBQvFcJ^ZWgT#criBgpGm#`n9!R{y00Wr@x(`D|I;=bs2NCMK)eg9`87G zWQ$ujVSAD4=5qPnK}jJ%IdSLRX#STde&4a{d7rP9=Dt6CYYn%&dl{vf5(M2deLpaJ zX(hh)bqjTbddLklh|gccHD@fvu2E4)9=6ZVWF}^8vQdyjoS(+yS z-hxrHOjFY}c%bm12MoaXt9x!e^7JNG<)pUK@E|JF=|kc)gYOOPZ$WvnAmTU;9Xk!` zSmY*>tphIb(zor6?d9H%R?KqC<{)Y`bYT_1+j+sGAXk)}Rw}WmOk4GSO$4!{`Sa-x zJehoK_CPTo;-aB#cAke1OGObVb=cfAPe9!fzZY9lVYFSmC-JGN;u01M6HrIOy688vS% zM9R)==q(vr5eJ<)ml2xSHVM1#&fji_w!CuA7NWDksYb>GUWnUVz+%$UtcSeJ79* zlMw-U){3&tr(1eRxAK;84*AW`D{Z!CD+VSI*D*2V1J2k@0Oh)S8TT2fquPI}-v0n$ CpCCy9 diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt index c4722fe587..86d61fba75 100644 --- a/testing/web3signer_tests/tls/web3signer/known_clients.txt +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -1 +1 @@ -lighthouse 02:D0:A8:C0:6A:59:90:40:54:67:D4:BD:AE:5A:D4:F5:14:A9:79:38:98:E0:62:93:C1:77:13:FC:B4:60:65:CE +lighthouse 49:99:C9:A4:05:4C:EC:BE:FD:0B:C3:C3:C1:2F:A4:D3:AB:70:96:47:51:F5:5B:3B:37:65:31:56:18:B7:B8:AD From 75d90795be0fe3ddbcb78402d35aab345dc88e2c Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 16 Dec 2024 14:44:06 +0900 Subject: [PATCH 13/25] Remove req_id from CustodyId (#6589) * Remove req_id from CustodyId because it's not used --- beacon_node/lighthouse_network/src/service/api_types.rs | 1 - beacon_node/network/src/sync/network_context.rs | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index cb22815390..85fabbb0c3 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -67,7 +67,6 @@ pub struct SamplingRequestId(pub usize); #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub struct CustodyId { pub requester: CustodyRequester, - pub req_id: Id, } /// Downstream components that perform custody by root requests. diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index c4d987e858..b6b7b315f3 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -763,8 +763,7 @@ impl SyncNetworkContext { let requester = CustodyRequester(id); let mut request = ActiveCustodyRequest::new( block_root, - // TODO(das): req_id is duplicated here, also present in id - CustodyId { requester, req_id }, + CustodyId { requester }, &custody_indexes_to_fetch, self.log.clone(), ); From 1c5be34def7ea46297524180d3b5a1fd2b4c1ac7 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 16 Dec 2024 13:44:10 +0800 Subject: [PATCH 14/25] Write range sync tests in external event-driven form (#6618) * Write range sync tests in external event-driven form * Fix remaining test * Drop unused generics * Merge branch 'unstable' into range-sync-tests * Add reference to test author * Use async await * Fix failing test. Not sure how it was passing before without an EL. --- beacon_node/network/src/sync/manager.rs | 10 + .../src/sync/range_sync/block_storage.rs | 13 - .../src/sync/range_sync/chain_collection.rs | 21 +- .../network/src/sync/range_sync/mod.rs | 3 +- .../network/src/sync/range_sync/range.rs | 482 +----------------- .../network/src/sync/range_sync/sync_type.rs | 9 +- beacon_node/network/src/sync/tests/lookups.rs | 30 +- beacon_node/network/src/sync/tests/range.rs | 272 ++++++++++ 8 files changed, 328 insertions(+), 512 deletions(-) delete mode 100644 beacon_node/network/src/sync/range_sync/block_storage.rs diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 344e91711c..5d02be2b4c 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -362,6 +362,16 @@ impl SyncManager { self.sampling.get_request_status(block_root, index) } + #[cfg(test)] + pub(crate) fn range_sync_state(&self) -> super::range_sync::SyncChainStatus { + self.range_sync.state() + } + + #[cfg(test)] + pub(crate) fn update_execution_engine_state(&mut self, state: EngineState) { + self.handle_new_execution_engine_state(state); + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } diff --git a/beacon_node/network/src/sync/range_sync/block_storage.rs b/beacon_node/network/src/sync/range_sync/block_storage.rs deleted file mode 100644 index df49543a6b..0000000000 --- a/beacon_node/network/src/sync/range_sync/block_storage.rs +++ /dev/null @@ -1,13 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::Hash256; - -/// Trait that helps maintain RangeSync's implementation split from the BeaconChain -pub trait BlockStorage { - fn is_block_known(&self, block_root: &Hash256) -> bool; -} - -impl BlockStorage for BeaconChain { - fn is_block_known(&self, block_root: &Hash256) -> bool { - self.block_is_known_to_fork_choice(block_root) - } -} diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 1217fbf8fe..c030d0a19e 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -3,12 +3,11 @@ //! Each chain type is stored in it's own map. A variety of helper functions are given along with //! this struct to simplify the logic of the other layers of sync. -use super::block_storage::BlockStorage; use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::sync::network_context::SyncNetworkContext; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; @@ -37,10 +36,13 @@ pub enum RangeSyncState { Idle, } +pub type SyncChainStatus = + Result, &'static str>; + /// A collection of finalized and head chains currently being processed. -pub struct ChainCollection { +pub struct ChainCollection { /// The beacon chain for processing. - beacon_chain: Arc, + beacon_chain: Arc>, /// The set of finalized chains being synced. finalized_chains: FnvHashMap>, /// The set of head chains being synced. @@ -51,8 +53,8 @@ pub struct ChainCollection { log: slog::Logger, } -impl ChainCollection { - pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { +impl ChainCollection { + pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { ChainCollection { beacon_chain, finalized_chains: FnvHashMap::default(), @@ -213,9 +215,7 @@ impl ChainCollection { } } - pub fn state( - &self, - ) -> Result, &'static str> { + pub fn state(&self) -> SyncChainStatus { match self.state { RangeSyncState::Finalized(ref syncing_id) => { let chain = self @@ -409,7 +409,8 @@ impl ChainCollection { let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { - target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root) + target_slot <= &local_finalized_slot + || beacon_chain.block_is_known_to_fork_choice(target_root) }; // Retain only head peers that remain relevant diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index d0f2f9217e..8f881fba90 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -2,7 +2,6 @@ //! peers. mod batch; -mod block_storage; mod chain; mod chain_collection; mod range; @@ -13,5 +12,7 @@ pub use batch::{ ByRangeRequestType, }; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; +#[cfg(test)] +pub use chain_collection::SyncChainStatus; pub use range::RangeSync; pub use sync_type::RangeSyncType; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 0ef99838de..78679403bb 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -39,9 +39,8 @@ //! Each chain is downloaded in batches of blocks. The batched blocks are processed sequentially //! and further batches are requested as current blocks are being processed. -use super::block_storage::BlockStorage; use super::chain::{BatchId, ChainId, RemoveChain, SyncingChain}; -use super::chain_collection::ChainCollection; +use super::chain_collection::{ChainCollection, SyncChainStatus}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::status::ToStatusMessage; @@ -56,7 +55,7 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256}; /// For how long we store failed finalized chains to prevent retries. const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; @@ -64,27 +63,26 @@ const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; /// The primary object dealing with long range/batch syncing. This contains all the active and /// non-active chains that need to be processed before the syncing is considered complete. This /// holds the current state of the long range sync. -pub struct RangeSync> { +pub struct RangeSync { /// The beacon chain for processing. - beacon_chain: Arc, + beacon_chain: Arc>, /// Last known sync info of our useful connected peers. We use this information to create Head /// chains after all finalized chains have ended. awaiting_head_peers: HashMap, /// A collection of chains that need to be downloaded. This stores any head or finalized chains /// that need to be downloaded. - chains: ChainCollection, + chains: ChainCollection, /// Chains that have failed and are stored to prevent being retried. failed_chains: LRUTimeCache, /// The syncing logger. log: slog::Logger, } -impl RangeSync +impl RangeSync where - C: BlockStorage + ToStatusMessage, T: BeaconChainTypes, { - pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { + pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), chains: ChainCollection::new(beacon_chain, log.clone()), @@ -96,9 +94,7 @@ where } } - pub fn state( - &self, - ) -> Result, &'static str> { + pub fn state(&self) -> SyncChainStatus { self.chains.state() } @@ -382,465 +378,3 @@ where } } } - -#[cfg(test)] -mod tests { - use crate::network_beacon_processor::NetworkBeaconProcessor; - use crate::sync::SyncMessage; - use crate::NetworkMessage; - - use super::*; - use crate::sync::network_context::{BlockOrBlob, RangeRequestId}; - use beacon_chain::builder::Witness; - use beacon_chain::eth1_chain::CachingEth1Backend; - use beacon_chain::parking_lot::RwLock; - use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use beacon_chain::EngineState; - use beacon_processor::WorkEvent as BeaconWorkEvent; - use lighthouse_network::service::api_types::SyncRequestId; - use lighthouse_network::{ - rpc::StatusMessage, service::api_types::AppRequestId, NetworkConfig, NetworkGlobals, - }; - use slog::{o, Drain}; - use slot_clock::TestingSlotClock; - use std::collections::HashSet; - use store::MemoryStore; - use tokio::sync::mpsc; - use types::{FixedBytesExtended, ForkName, MinimalEthSpec as E}; - - #[derive(Debug)] - struct FakeStorage { - known_blocks: RwLock>, - status: RwLock, - } - - impl Default for FakeStorage { - fn default() -> Self { - FakeStorage { - known_blocks: RwLock::new(HashSet::new()), - status: RwLock::new(StatusMessage { - fork_digest: [0; 4], - finalized_root: Hash256::zero(), - finalized_epoch: 0usize.into(), - head_root: Hash256::zero(), - head_slot: 0usize.into(), - }), - } - } - } - - impl FakeStorage { - fn remember_block(&self, block_root: Hash256) { - self.known_blocks.write().insert(block_root); - } - - #[allow(dead_code)] - fn forget_block(&self, block_root: &Hash256) { - self.known_blocks.write().remove(block_root); - } - } - - impl BlockStorage for FakeStorage { - fn is_block_known(&self, block_root: &store::Hash256) -> bool { - self.known_blocks.read().contains(block_root) - } - } - - impl ToStatusMessage for FakeStorage { - fn status_message(&self) -> StatusMessage { - self.status.read().clone() - } - } - - type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; - - fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - if enabled { - slog::Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), o!()) - } - } - - #[allow(unused)] - struct TestRig { - log: slog::Logger, - /// To check what does sync send to the beacon processor. - beacon_processor_rx: mpsc::Receiver>, - /// To set up different scenarios where sync is told about known/unknown blocks. - chain: Arc, - /// Needed by range to handle communication with the network. - cx: SyncNetworkContext, - /// To check what the network receives from Range. - network_rx: mpsc::UnboundedReceiver>, - /// To modify what the network declares about various global variables, in particular about - /// the sync state of a peer. - globals: Arc>, - } - - impl RangeSync { - fn assert_state(&self, expected_state: RangeSyncType) { - assert_eq!( - self.state() - .expect("State is ok") - .expect("Range is syncing") - .0, - expected_state - ) - } - - #[allow(dead_code)] - fn assert_not_syncing(&self) { - assert!( - self.state().expect("State is ok").is_none(), - "Range should not be syncing." - ); - } - } - - impl TestRig { - fn local_info(&self) -> SyncInfo { - let StatusMessage { - fork_digest: _, - finalized_root, - finalized_epoch, - head_root, - head_slot, - } = self.chain.status.read().clone(); - SyncInfo { - head_slot, - head_root, - finalized_epoch, - finalized_root, - } - } - - /// Reads an BlocksByRange request to a given peer from the network receiver channel. - #[track_caller] - fn grab_request( - &mut self, - expected_peer: &PeerId, - fork_name: ForkName, - ) -> (AppRequestId, Option) { - let block_req_id = if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - request_id - } else { - panic!("Should have sent a batch request to the peer") - }; - let blob_req_id = if fork_name.deneb_enabled() { - if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - Some(request_id) - } else { - panic!("Should have sent a batch request to the peer") - } - } else { - None - }; - (block_req_id, blob_req_id) - } - - fn complete_range_block_and_blobs_response( - &mut self, - block_req: AppRequestId, - blob_req_opt: Option, - ) -> (ChainId, BatchId, Id) { - if blob_req_opt.is_some() { - match block_req { - AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { - let _ = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Block(None)); - let response = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Blob(None)) - .unwrap(); - let (chain_id, batch_id) = - TestRig::unwrap_range_request_id(response.sender_id); - (chain_id, batch_id, id) - } - other => panic!("unexpected request {:?}", other), - } - } else { - match block_req { - AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { - let response = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Block(None)) - .unwrap(); - let (chain_id, batch_id) = - TestRig::unwrap_range_request_id(response.sender_id); - (chain_id, batch_id, id) - } - other => panic!("unexpected request {:?}", other), - } - } - } - - fn unwrap_range_request_id(sender_id: RangeRequestId) -> (ChainId, BatchId) { - if let RangeRequestId::RangeSync { chain_id, batch_id } = sender_id { - (chain_id, batch_id) - } else { - panic!("expected RangeSync request: {:?}", sender_id) - } - } - - /// Produce a head peer - fn head_peer( - &self, - ) -> ( - PeerId, - SyncInfo, /* Local info */ - SyncInfo, /* Remote info */ - ) { - let local_info = self.local_info(); - - // Get a peer with an advanced head - let head_root = Hash256::random(); - let head_slot = local_info.head_slot + 1; - let remote_info = SyncInfo { - head_root, - head_slot, - ..local_info - }; - let peer_id = PeerId::random(); - (peer_id, local_info, remote_info) - } - - fn finalized_peer( - &self, - ) -> ( - PeerId, - SyncInfo, /* Local info */ - SyncInfo, /* Remote info */ - ) { - let local_info = self.local_info(); - - let finalized_root = Hash256::random(); - let finalized_epoch = local_info.finalized_epoch + 2; - let head_slot = finalized_epoch.start_slot(E::slots_per_epoch()); - let head_root = Hash256::random(); - let remote_info = SyncInfo { - finalized_epoch, - finalized_root, - head_slot, - head_root, - }; - - let peer_id = PeerId::random(); - (peer_id, local_info, remote_info) - } - - #[track_caller] - fn expect_empty_processor(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - panic!( - "Expected empty processor. Instead got {}", - work.work_type_str() - ); - } - Err(e) => match e { - mpsc::error::TryRecvError::Empty => {} - mpsc::error::TryRecvError::Disconnected => unreachable!("bad coded test?"), - }, - } - } - - #[track_caller] - fn expect_chain_segment(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::WorkType::ChainSegment); - } - other => panic!("Expected chain segment process, found {:?}", other), - } - } - } - - fn range(log_enabled: bool) -> (TestRig, RangeSync) { - let log = build_log(slog::Level::Trace, log_enabled); - // Initialise a new beacon chain - let harness = BeaconChainHarness::>::builder(E) - .default_spec() - .logger(log.clone()) - .deterministic_keypairs(1) - .fresh_ephemeral_store() - .build(); - let chain = harness.chain; - - let fake_store = Arc::new(FakeStorage::default()); - let range_sync = RangeSync::::new( - fake_store.clone(), - log.new(o!("component" => "range")), - ); - let (network_tx, network_rx) = mpsc::unbounded_channel(); - let (sync_tx, _sync_rx) = mpsc::unbounded_channel::>(); - let network_config = Arc::new(NetworkConfig::default()); - let globals = Arc::new(NetworkGlobals::new_test_globals( - Vec::new(), - &log, - network_config, - chain.spec.clone(), - )); - let (network_beacon_processor, beacon_processor_rx) = - NetworkBeaconProcessor::null_for_testing( - globals.clone(), - sync_tx, - chain.clone(), - harness.runtime.task_executor.clone(), - log.clone(), - ); - let cx = SyncNetworkContext::new( - network_tx, - Arc::new(network_beacon_processor), - chain, - log.new(o!("component" => "network_context")), - ); - let test_rig = TestRig { - log, - beacon_processor_rx, - chain: fake_store, - cx, - network_rx, - globals, - }; - (test_rig, range_sync) - } - - #[test] - fn head_chain_removed_while_finalized_syncing() { - // NOTE: this is a regression test. - let (mut rig, mut range) = range(false); - - // Get a peer with an advanced head - let (head_peer, local_info, remote_info) = rig.head_peer(); - range.add_peer(&mut rig.cx, local_info, head_peer, remote_info); - range.assert_state(RangeSyncType::Head); - - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // Sync should have requested a batch, grab the request. - let _ = rig.grab_request(&head_peer, fork); - - // Now get a peer with an advanced finalized epoch. - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - range.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.grab_request(&finalized_peer, fork); - - // Fail the head chain by disconnecting the peer. - range.remove_peer(&mut rig.cx, &head_peer); - range.assert_state(RangeSyncType::Finalized); - } - - #[test] - fn state_update_while_purging() { - // NOTE: this is a regression test. - let (mut rig, mut range) = range(true); - - // Get a peer with an advanced head - let (head_peer, local_info, head_info) = rig.head_peer(); - let head_peer_root = head_info.head_root; - range.add_peer(&mut rig.cx, local_info, head_peer, head_info); - range.assert_state(RangeSyncType::Head); - - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // Sync should have requested a batch, grab the request. - let _ = rig.grab_request(&head_peer, fork); - - // Now get a peer with an advanced finalized epoch. - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - let finalized_peer_root = remote_info.finalized_root; - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - range.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.grab_request(&finalized_peer, fork); - - // Now the chain knows both chains target roots. - rig.chain.remember_block(head_peer_root); - rig.chain.remember_block(finalized_peer_root); - - // Add an additional peer to the second chain to make range update it's status - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - } - - #[test] - fn pause_and_resume_on_ee_offline() { - let (mut rig, mut range) = range(true); - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // add some peers - let (peer1, local_info, head_info) = rig.head_peer(); - range.add_peer(&mut rig.cx, local_info, peer1, head_info); - let (block_req, blob_req_opt) = rig.grab_request(&peer1, fork); - - let (chain1, batch1, id1) = - rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); - - // make the ee offline - rig.cx.update_execution_engine_state(EngineState::Offline); - - // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, vec![]); - - // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); - - // while the ee is offline, more peers might arrive. Add a new finalized peer. - let (peer2, local_info, finalized_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); - let (block_req, blob_req_opt) = rig.grab_request(&peer2, fork); - - let (chain2, batch2, id2) = - rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); - - // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, vec![]); - - // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); - - // make the beacon processor available again. - rig.cx.update_execution_engine_state(EngineState::Online); - - // now resume range, we should have two processing requests in the beacon processor. - range.resume(&mut rig.cx); - - rig.expect_chain_segment(); - rig.expect_chain_segment(); - } -} diff --git a/beacon_node/network/src/sync/range_sync/sync_type.rs b/beacon_node/network/src/sync/range_sync/sync_type.rs index d6ffd4a5df..4ff7e39310 100644 --- a/beacon_node/network/src/sync/range_sync/sync_type.rs +++ b/beacon_node/network/src/sync/range_sync/sync_type.rs @@ -1,10 +1,9 @@ //! Contains logic about identifying which Sync to perform given PeerSyncInfo of ourselves and //! of a remote. +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::SyncInfo; -use super::block_storage::BlockStorage; - /// The type of Range sync that should be done relative to our current state. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum RangeSyncType { @@ -17,8 +16,8 @@ pub enum RangeSyncType { impl RangeSyncType { /// Determines the type of sync given our local `PeerSyncInfo` and the remote's /// `PeerSyncInfo`. - pub fn new( - chain: &C, + pub fn new( + chain: &BeaconChain, local_info: &SyncInfo, remote_info: &SyncInfo, ) -> RangeSyncType { @@ -29,7 +28,7 @@ impl RangeSyncType { // not seen the finalized hash before. if remote_info.finalized_epoch > local_info.finalized_epoch - && !chain.is_block_known(&remote_info.finalized_root) + && !chain.block_is_known_to_fork_choice(&remote_info.finalized_root) { RangeSyncType::Finalized } else { diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 9f2c9ef66f..94aacad3e8 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -83,6 +83,7 @@ impl TestRig { .logger(log.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() + .mock_execution_layer() .testing_slot_clock(TestingSlotClock::new( Slot::new(0), Duration::from_secs(0), @@ -144,7 +145,7 @@ impl TestRig { } } - fn test_setup() -> Self { + pub fn test_setup() -> Self { Self::test_setup_with_config(None) } @@ -168,11 +169,11 @@ impl TestRig { } } - fn log(&self, msg: &str) { + pub fn log(&self, msg: &str) { info!(self.log, "TEST_RIG"; "msg" => msg); } - fn after_deneb(&self) -> bool { + pub fn after_deneb(&self) -> bool { matches!(self.fork_name, ForkName::Deneb | ForkName::Electra) } @@ -238,7 +239,7 @@ impl TestRig { (parent, block, parent_root, block_root) } - fn send_sync_message(&mut self, sync_message: SyncMessage) { + pub fn send_sync_message(&mut self, sync_message: SyncMessage) { self.sync_manager.handle_message(sync_message); } @@ -369,7 +370,7 @@ impl TestRig { self.expect_empty_network(); } - fn new_connected_peer(&mut self) -> PeerId { + pub fn new_connected_peer(&mut self) -> PeerId { self.network_globals .peers .write() @@ -811,7 +812,7 @@ impl TestRig { } } - fn peer_disconnected(&mut self, peer_id: PeerId) { + pub fn peer_disconnected(&mut self, peer_id: PeerId) { self.send_sync_message(SyncMessage::Disconnect(peer_id)); } @@ -827,7 +828,7 @@ impl TestRig { } } - fn pop_received_network_event) -> Option>( + pub fn pop_received_network_event) -> Option>( &mut self, predicate_transform: F, ) -> Result { @@ -847,7 +848,7 @@ impl TestRig { } } - fn pop_received_processor_event) -> Option>( + pub fn pop_received_processor_event) -> Option>( &mut self, predicate_transform: F, ) -> Result { @@ -871,6 +872,16 @@ impl TestRig { } } + pub fn expect_empty_processor(&mut self) { + self.drain_processor_rx(); + if !self.beacon_processor_rx_queue.is_empty() { + panic!( + "Expected processor to be empty, but has events: {:?}", + self.beacon_processor_rx_queue + ); + } + } + fn find_block_lookup_request( &mut self, for_block: Hash256, @@ -2173,7 +2184,8 @@ fn custody_lookup_happy_path() { mod deneb_only { use super::*; use beacon_chain::{ - block_verification_types::RpcBlock, data_availability_checker::AvailabilityCheckError, + block_verification_types::{AsBlock, RpcBlock}, + data_availability_checker::AvailabilityCheckError, }; use ssz_types::VariableList; use std::collections::VecDeque; diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 8b13789179..6faa8b7247 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -1 +1,273 @@ +use super::*; +use crate::status::ToStatusMessage; +use crate::sync::manager::SLOT_IMPORT_TOLERANCE; +use crate::sync::range_sync::RangeSyncType; +use crate::sync::SyncMessage; +use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; +use beacon_chain::EngineState; +use lighthouse_network::rpc::{RequestType, StatusMessage}; +use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; +use lighthouse_network::{PeerId, SyncInfo}; +use std::time::Duration; +use types::{EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot}; +const D: Duration = Duration::new(0, 0); + +impl TestRig { + /// Produce a head peer with an advanced head + fn add_head_peer(&mut self) -> PeerId { + self.add_head_peer_with_root(Hash256::random()) + } + + /// Produce a head peer with an advanced head + fn add_head_peer_with_root(&mut self, head_root: Hash256) -> PeerId { + let local_info = self.local_info(); + self.add_peer(SyncInfo { + head_root, + head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64), + ..local_info + }) + } + + // Produce a finalized peer with an advanced finalized epoch + fn add_finalized_peer(&mut self) -> PeerId { + self.add_finalized_peer_with_root(Hash256::random()) + } + + // Produce a finalized peer with an advanced finalized epoch + fn add_finalized_peer_with_root(&mut self, finalized_root: Hash256) -> PeerId { + let local_info = self.local_info(); + let finalized_epoch = local_info.finalized_epoch + 2; + self.add_peer(SyncInfo { + finalized_epoch, + finalized_root, + head_slot: finalized_epoch.start_slot(E::slots_per_epoch()), + head_root: Hash256::random(), + }) + } + + fn local_info(&self) -> SyncInfo { + let StatusMessage { + fork_digest: _, + finalized_root, + finalized_epoch, + head_root, + head_slot, + } = self.harness.chain.status_message(); + SyncInfo { + head_slot, + head_root, + finalized_epoch, + finalized_root, + } + } + + fn add_peer(&mut self, remote_info: SyncInfo) -> PeerId { + // Create valid peer known to network globals + let peer_id = self.new_connected_peer(); + // Send peer to sync + self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info.clone())); + peer_id + } + + fn assert_state(&self, state: RangeSyncType) { + assert_eq!( + self.sync_manager + .range_sync_state() + .expect("State is ok") + .expect("Range should be syncing") + .0, + state, + "not expected range sync state" + ); + } + + #[track_caller] + fn expect_chain_segment(&mut self) { + self.pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expect ChainSegment work event: {e:?}")); + } + + fn update_execution_engine_state(&mut self, state: EngineState) { + self.log(&format!("execution engine state updated: {state:?}")); + self.sync_manager.update_execution_engine_state(state); + } + + fn find_blocks_by_range_request(&mut self, target_peer_id: &PeerId) -> (Id, Option) { + let block_req_id = self + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlocksByRange(_), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + } if peer_id == target_peer_id => Some(*id), + _ => None, + }) + .expect("Should have a blocks by range request"); + + let blob_req_id = if self.after_deneb() { + Some( + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlobsByRange(_), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + } if peer_id == target_peer_id => Some(*id), + _ => None, + }) + .expect("Should have a blobs by range request"), + ) + } else { + None + }; + + (block_req_id, blob_req_id) + } + + fn find_and_complete_blocks_by_range_request(&mut self, target_peer_id: PeerId) { + let (blocks_req_id, blobs_req_id) = self.find_blocks_by_range_request(&target_peer_id); + + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlocksByRange request {blocks_req_id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlock { + request_id: SyncRequestId::RangeBlockAndBlobs { id: blocks_req_id }, + peer_id: target_peer_id, + beacon_block: None, + seen_timestamp: D, + }); + + if let Some(blobs_req_id) = blobs_req_id { + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlobsByRange request {blobs_req_id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlob { + request_id: SyncRequestId::RangeBlockAndBlobs { id: blobs_req_id }, + peer_id: target_peer_id, + blob_sidecar: None, + seen_timestamp: D, + }); + } + } + + async fn create_canonical_block(&mut self) -> SignedBeaconBlock { + self.harness.advance_slot(); + + let block_root = self + .harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness + .chain + .store + .get_full_block(&block_root) + .unwrap() + .unwrap() + } + + async fn remember_block(&mut self, block: SignedBeaconBlock) { + self.harness + .process_block(block.slot(), block.canonical_root(), (block.into(), None)) + .await + .unwrap(); + } +} + +#[test] +fn head_chain_removed_while_finalized_syncing() { + // NOTE: this is a regression test. + // Added in PR https://github.com/sigp/lighthouse/pull/2821 + let mut rig = TestRig::test_setup(); + + // Get a peer with an advanced head + let head_peer = rig.add_head_peer(); + rig.assert_state(RangeSyncType::Head); + + // Sync should have requested a batch, grab the request. + let _ = rig.find_blocks_by_range_request(&head_peer); + + // Now get a peer with an advanced finalized epoch. + let finalized_peer = rig.add_finalized_peer(); + rig.assert_state(RangeSyncType::Finalized); + + // Sync should have requested a batch, grab the request + let _ = rig.find_blocks_by_range_request(&finalized_peer); + + // Fail the head chain by disconnecting the peer. + rig.peer_disconnected(head_peer); + rig.assert_state(RangeSyncType::Finalized); +} + +#[tokio::test] +async fn state_update_while_purging() { + // NOTE: this is a regression test. + // Added in PR https://github.com/sigp/lighthouse/pull/2827 + let mut rig = TestRig::test_setup(); + + // Create blocks on a separate harness + let mut rig_2 = TestRig::test_setup(); + // Need to create blocks that can be inserted into the fork-choice and fit the "known + // conditions" below. + let head_peer_block = rig_2.create_canonical_block().await; + let head_peer_root = head_peer_block.canonical_root(); + let finalized_peer_block = rig_2.create_canonical_block().await; + let finalized_peer_root = finalized_peer_block.canonical_root(); + + // Get a peer with an advanced head + let head_peer = rig.add_head_peer_with_root(head_peer_root); + rig.assert_state(RangeSyncType::Head); + + // Sync should have requested a batch, grab the request. + let _ = rig.find_blocks_by_range_request(&head_peer); + + // Now get a peer with an advanced finalized epoch. + let finalized_peer = rig.add_finalized_peer_with_root(finalized_peer_root); + rig.assert_state(RangeSyncType::Finalized); + + // Sync should have requested a batch, grab the request + let _ = rig.find_blocks_by_range_request(&finalized_peer); + + // Now the chain knows both chains target roots. + rig.remember_block(head_peer_block).await; + rig.remember_block(finalized_peer_block).await; + + // Add an additional peer to the second chain to make range update it's status + rig.add_finalized_peer(); +} + +#[test] +fn pause_and_resume_on_ee_offline() { + let mut rig = TestRig::test_setup(); + + // add some peers + let peer1 = rig.add_head_peer(); + // make the ee offline + rig.update_execution_engine_state(EngineState::Offline); + // send the response to the request + rig.find_and_complete_blocks_by_range_request(peer1); + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + + // while the ee is offline, more peers might arrive. Add a new finalized peer. + let peer2 = rig.add_finalized_peer(); + + // send the response to the request + rig.find_and_complete_blocks_by_range_request(peer2); + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + // make the beacon processor available again. + // update_execution_engine_state implicitly calls resume + // now resume range, we should have two processing requests in the beacon processor. + rig.update_execution_engine_state(EngineState::Online); + + rig.expect_chain_segment(); + rig.expect_chain_segment(); +} From 847c8019c7867e3eaf65168e5259ea33e7e0eb5a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 16 Dec 2024 16:44:14 +1100 Subject: [PATCH 15/25] Fix peer down-scoring behaviour when gossip blobs/columns are received after `getBlobs` or reconstruction (#6686) * Fix peer disconnection when gossip blobs/columns are received after they are recieved from the EL or available via column reconstruction. --- .../gossip_methods.rs | 26 +++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 4fc83b0923..f3c48e42f0 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -710,8 +710,19 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } + GossipDataColumnError::PriorKnown { .. } => { + // Data column is available via either the EL or reconstruction. + // Do not penalise the peer. + // Gossip filter should filter any duplicates received after this. + debug!( + self.log, + "Received already available column sidecar. Ignoring the column sidecar"; + "slot" => %slot, + "block_root" => %block_root, + "index" => %index, + ) + } GossipDataColumnError::FutureSlot { .. } - | GossipDataColumnError::PriorKnown { .. } | GossipDataColumnError::PastFinalizedSlot { .. } => { debug!( self.log, @@ -852,7 +863,18 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } - GossipBlobError::FutureSlot { .. } | GossipBlobError::RepeatBlob { .. } => { + GossipBlobError::RepeatBlob { .. } => { + // We may have received the blob from the EL. Do not penalise the peer. + // Gossip filter should filter any duplicates received after this. + debug!( + self.log, + "Received already available blob sidecar. Ignoring the blob sidecar"; + "slot" => %slot, + "root" => %root, + "index" => %index, + ) + } + GossipBlobError::FutureSlot { .. } => { debug!( self.log, "Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; From 02cb2d68ff6f5bc3a4bc34baff9926d6b449e144 Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Tue, 17 Dec 2024 01:40:35 +0100 Subject: [PATCH 16/25] Enable lints for tests only running optimized (#6664) * enable linting optimized-only tests * fix automatically fixable or obvious lints * fix suspicious_open_options by removing manual options * fix `await_holding_lock`s * avoid failing lint due to now disabled `#[cfg(debug_assertions)]` * reduce future sizes in tests * fix accidently flipped assert logic * restore holding lock for web3signer download * Merge branch 'unstable' into lint-opt-tests --- Makefile | 2 +- account_manager/src/validator/exit.rs | 2 +- .../beacon_chain/src/shuffling_cache.rs | 2 +- .../tests/attestation_production.rs | 4 +- .../tests/attestation_verification.rs | 42 +- beacon_node/beacon_chain/tests/bellatrix.rs | 14 +- beacon_node/beacon_chain/tests/capella.rs | 8 +- .../tests/payload_invalidation.rs | 4 +- beacon_node/beacon_chain/tests/store_tests.rs | 93 ++-- .../tests/sync_committee_verification.rs | 4 +- beacon_node/beacon_chain/tests/tests.rs | 5 +- .../tests/broadcast_validation_tests.rs | 18 +- .../http_api/tests/interactive_tests.rs | 2 +- beacon_node/http_api/tests/status_tests.rs | 30 +- beacon_node/http_api/tests/tests.rs | 129 +++--- .../src/service/gossip_cache.rs | 23 +- .../src/network_beacon_processor/tests.rs | 2 +- beacon_node/network/src/service/tests.rs | 402 +++++++++--------- beacon_node/operation_pool/src/lib.rs | 62 ++- .../eth2_wallet_manager/src/wallet_manager.rs | 32 +- consensus/fork_choice/tests/tests.rs | 26 +- crypto/eth2_keystore/tests/eip2335_vectors.rs | 4 +- crypto/eth2_keystore/tests/tests.rs | 14 +- crypto/eth2_wallet/tests/tests.rs | 13 +- lighthouse/tests/account_manager.rs | 28 +- lighthouse/tests/beacon_node.rs | 51 ++- lighthouse/tests/boot_node.rs | 4 +- lighthouse/tests/validator_client.rs | 8 +- lighthouse/tests/validator_manager.rs | 16 +- testing/web3signer_tests/src/lib.rs | 4 +- validator_client/http_api/src/tests.rs | 15 +- .../http_api/src/tests/keystores.rs | 65 +-- validator_manager/src/import_validators.rs | 4 +- validator_manager/src/move_validators.rs | 14 +- 34 files changed, 572 insertions(+), 574 deletions(-) diff --git a/Makefile b/Makefile index ab239c94d3..958abf8705 100644 --- a/Makefile +++ b/Makefile @@ -204,7 +204,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ + RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ -D clippy::manual_let_else \ -D clippy::large_stack_frames \ diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 3fb0e50d22..ea1a24da1f 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -409,6 +409,6 @@ mod tests { ) .unwrap(); - assert_eq!(expected_pk, kp.pk.into()); + assert_eq!(expected_pk, kp.pk); } } diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index a662cc49c9..da1d60db17 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -512,7 +512,7 @@ mod test { } assert!( - !cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0), + !cache.contains(&shuffling_id_and_committee_caches.first().unwrap().0), "should not contain oldest epoch shuffling id" ); assert_eq!( diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 0b121356b9..87fefe7114 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -70,12 +70,12 @@ async fn produces_attestations_from_attestation_simulator_service() { } // Compare the prometheus metrics that evaluates the performance of the unaggregated attestations - let hit_prometheus_metrics = vec![ + let hit_prometheus_metrics = [ metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, ]; - let miss_prometheus_metrics = vec![ + let miss_prometheus_metrics = [ metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index e168cbb6f4..dcc63ddf62 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -431,10 +431,12 @@ impl GossipTester { .chain .verify_aggregated_attestation_for_gossip(&aggregate) .err() - .expect(&format!( - "{} should error during verify_aggregated_attestation_for_gossip", - desc - )); + .unwrap_or_else(|| { + panic!( + "{} should error during verify_aggregated_attestation_for_gossip", + desc + ) + }); inspect_err(&self, err); /* @@ -449,10 +451,12 @@ impl GossipTester { .unwrap(); assert_eq!(results.len(), 2); - let batch_err = results.pop().unwrap().err().expect(&format!( - "{} should error during batch_verify_aggregated_attestations_for_gossip", - desc - )); + let batch_err = results.pop().unwrap().err().unwrap_or_else(|| { + panic!( + "{} should error during batch_verify_aggregated_attestations_for_gossip", + desc + ) + }); inspect_err(&self, batch_err); self @@ -475,10 +479,12 @@ impl GossipTester { .chain .verify_unaggregated_attestation_for_gossip(&attn, Some(subnet_id)) .err() - .expect(&format!( - "{} should error during verify_unaggregated_attestation_for_gossip", - desc - )); + .unwrap_or_else(|| { + panic!( + "{} should error during verify_unaggregated_attestation_for_gossip", + desc + ) + }); inspect_err(&self, err); /* @@ -496,10 +502,12 @@ impl GossipTester { ) .unwrap(); assert_eq!(results.len(), 2); - let batch_err = results.pop().unwrap().err().expect(&format!( - "{} should error during batch_verify_unaggregated_attestations_for_gossip", - desc - )); + let batch_err = results.pop().unwrap().err().unwrap_or_else(|| { + panic!( + "{} should error during batch_verify_unaggregated_attestations_for_gossip", + desc + ) + }); inspect_err(&self, batch_err); self @@ -816,7 +824,7 @@ async fn aggregated_gossip_verification() { let (index, sk) = tester.non_aggregator(); *a = SignedAggregateAndProof::from_aggregate( index as u64, - tester.valid_aggregate.message().aggregate().clone(), + tester.valid_aggregate.message().aggregate(), None, &sk, &chain.canonical_head.cached_head().head_fork(), diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs index 5bd3452623..5080b0890b 100644 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -82,7 +82,7 @@ async fn merge_with_terminal_block_hash_override() { let block = &harness.chain.head_snapshot().beacon_block; - let execution_payload = block.message().body().execution_payload().unwrap().clone(); + let execution_payload = block.message().body().execution_payload().unwrap(); if i == 0 { assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); } @@ -133,7 +133,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(bellatrix_fork_slot).await; + Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; let bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!(bellatrix_head.as_bellatrix().is_ok()); @@ -207,15 +207,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - execution_payloads.push( - block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(), - ); + execution_payloads.push(block.message().body().execution_payload().unwrap().into()); } verify_execution_payload_chain(execution_payloads.as_slice()); diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index ac97a95721..3ce5702f2e 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -54,7 +54,7 @@ async fn base_altair_bellatrix_capella() { /* * Do the Altair fork. */ - harness.extend_to_slot(altair_fork_slot).await; + Box::pin(harness.extend_to_slot(altair_fork_slot)).await; let altair_head = &harness.chain.head_snapshot().beacon_block; assert!(altair_head.as_altair().is_ok()); @@ -63,7 +63,7 @@ async fn base_altair_bellatrix_capella() { /* * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(bellatrix_fork_slot).await; + Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; let bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!(bellatrix_head.as_bellatrix().is_ok()); @@ -81,7 +81,7 @@ async fn base_altair_bellatrix_capella() { /* * Next Bellatrix block shouldn't include an exec payload. */ - harness.extend_slots(1).await; + Box::pin(harness.extend_slots(1)).await; let one_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( @@ -112,7 +112,7 @@ async fn base_altair_bellatrix_capella() { terminal_block.timestamp = timestamp; } }); - harness.extend_slots(1).await; + Box::pin(harness.extend_slots(1)).await; let two_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 729d88450f..01b790bb25 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -413,7 +413,7 @@ async fn invalid_payload_invalidates_parent() { rig.import_block(Payload::Valid).await; // Import a valid transition block. rig.move_to_first_justification(Payload::Syncing).await; - let roots = vec![ + let roots = [ rig.import_block(Payload::Syncing).await, rig.import_block(Payload::Syncing).await, rig.import_block(Payload::Syncing).await, @@ -1052,7 +1052,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await, + rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 522020e476..73805a8525 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -330,7 +330,7 @@ async fn long_skip() { final_blocks as usize, BlockStrategy::ForkCanonicalChainAt { previous_slot: Slot::new(initial_blocks), - first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), + first_slot: Slot::new(initial_blocks + skip_slots + 1), }, AttestationStrategy::AllValidators, ) @@ -381,8 +381,7 @@ async fn randao_genesis_storage() { .beacon_state .randao_mixes() .iter() - .find(|x| **x == genesis_value) - .is_some()); + .any(|x| *x == genesis_value)); // Then upon adding one more block, it isn't harness.advance_slot(); @@ -393,14 +392,13 @@ async fn randao_genesis_storage() { AttestationStrategy::AllValidators, ) .await; - assert!(harness + assert!(!harness .chain .head_snapshot() .beacon_state .randao_mixes() .iter() - .find(|x| **x == genesis_value) - .is_none()); + .any(|x| *x == genesis_value)); check_finalization(&harness, num_slots); check_split_slot(&harness, store); @@ -1062,7 +1060,7 @@ fn check_shuffling_compatible( let current_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( &block_root, head_state.current_epoch(), - &head_state, + head_state, ); // Check for consistency with the more expensive shuffling lookup. @@ -1102,7 +1100,7 @@ fn check_shuffling_compatible( let previous_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( &block_root, head_state.previous_epoch(), - &head_state, + head_state, ); harness .chain @@ -1130,14 +1128,11 @@ fn check_shuffling_compatible( // Targeting two epochs before the current epoch should always return false if head_state.current_epoch() >= 2 { - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, - head_state.current_epoch() - 2, - &head_state - ), - false - ); + assert!(!harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch() - 2, + head_state + )); } } } @@ -1559,14 +1554,13 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks, _, _, _) = rig - .add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ) - .await; + let (canonical_blocks, _, _, _) = Box::pin(rig.add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + )) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1939,7 +1933,7 @@ async fn prune_single_block_long_skip() { 2 * slots_per_epoch, 1, 2 * slots_per_epoch, - 2 * slots_per_epoch as u64, + 2 * slots_per_epoch, 1, ) .await; @@ -1961,31 +1955,45 @@ async fn prune_shared_skip_states_mid_epoch() { #[tokio::test] async fn prune_shared_skip_states_epoch_boundaries() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch).await; - pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch).await; - pruning_test( - 2 * slots_per_epoch + slots_per_epoch / 2, - slots_per_epoch as u64 / 2, + Box::pin(pruning_test( + slots_per_epoch - 1, + 1, slots_per_epoch, - slots_per_epoch as u64 / 2 + 1, + 2, slots_per_epoch, - ) + )) .await; - pruning_test( - 2 * slots_per_epoch + slots_per_epoch / 2, - slots_per_epoch as u64 / 2, + Box::pin(pruning_test( + slots_per_epoch - 1, + 2, slots_per_epoch, - slots_per_epoch as u64 / 2 + 1, + 1, slots_per_epoch, - ) + )) .await; - pruning_test( + Box::pin(pruning_test( + 2 * slots_per_epoch + slots_per_epoch / 2, + slots_per_epoch / 2, + slots_per_epoch, + slots_per_epoch / 2 + 1, + slots_per_epoch, + )) + .await; + Box::pin(pruning_test( + 2 * slots_per_epoch + slots_per_epoch / 2, + slots_per_epoch / 2, + slots_per_epoch, + slots_per_epoch / 2 + 1, + slots_per_epoch, + )) + .await; + Box::pin(pruning_test( 2 * slots_per_epoch - 1, - slots_per_epoch as u64, + slots_per_epoch, 1, 0, 2 * slots_per_epoch, - ) + )) .await; } @@ -2094,7 +2102,7 @@ async fn pruning_test( ); check_chain_dump( &harness, - (num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1) as u64, + num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1, ); let all_canonical_states = harness @@ -2613,8 +2621,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { harness.advance_slot(); } harness.extend_to_slot(finalizing_slot - 1).await; - harness - .add_block_at_slot(finalizing_slot, harness.get_current_state()) + Box::pin(harness.add_block_at_slot(finalizing_slot, harness.get_current_state())) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index d1b3139d42..6d30b8a4e3 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -73,7 +73,7 @@ fn get_valid_sync_committee_message_for_block( let head_state = harness.chain.head_beacon_state_cloned(); let (signature, _) = harness .make_sync_committee_messages(&head_state, block_root, slot, relative_sync_committee) - .get(0) + .first() .expect("sync messages should exist") .get(message_index) .expect("first sync message should exist") @@ -104,7 +104,7 @@ fn get_valid_sync_contribution( ); let (_, contribution_opt) = sync_contributions - .get(0) + .first() .expect("sync contributions should exist"); let contribution = contribution_opt .as_ref() diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 7ae34ccf38..c641f32b82 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -170,7 +170,7 @@ async fn find_reorgs() { harness .extend_chain( - num_blocks_produced as usize, + num_blocks_produced, BlockStrategy::OnCanonicalHead, // No need to produce attestations for this test. AttestationStrategy::SomeValidators(vec![]), @@ -203,7 +203,7 @@ async fn find_reorgs() { assert_eq!( find_reorg_slot( &harness.chain, - &head_state, + head_state, harness.chain.head_beacon_block().canonical_root() ), head_slot @@ -503,7 +503,6 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { .unwrap(); let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) - .into_iter() .map(|validator_index| { let slot = state .get_attestation_duties(validator_index, RelativeEpoch::Current) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 1338f4f180..e1ecf2d4fc 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -322,7 +322,7 @@ pub async fn consensus_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn consensus_partial_pass_only_consensus() { /* this test targets gossip-level validation */ - let validation_level: Option = Some(BroadcastValidation::Consensus); + let validation_level = BroadcastValidation::Consensus; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -378,7 +378,7 @@ pub async fn consensus_partial_pass_only_consensus() { tester.harness.chain.clone(), &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -615,8 +615,7 @@ pub async fn equivocation_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ - let validation_level: Option = - Some(BroadcastValidation::ConsensusAndEquivocation); + let validation_level = BroadcastValidation::ConsensusAndEquivocation; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -671,7 +670,7 @@ pub async fn equivocation_consensus_late_equivocation() { tester.harness.chain, &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -1228,8 +1227,7 @@ pub async fn blinded_equivocation_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn blinded_equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ - let validation_level: Option = - Some(BroadcastValidation::ConsensusAndEquivocation); + let validation_level = BroadcastValidation::ConsensusAndEquivocation; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -1311,7 +1309,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { tester.harness.chain, &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -1465,8 +1463,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() { "need at least 2 blobs for partial reveal" ); - let partial_kzg_proofs = vec![blobs.0.get(0).unwrap().clone()]; - let partial_blobs = vec![blobs.1.get(0).unwrap().clone()]; + let partial_kzg_proofs = vec![*blobs.0.first().unwrap()]; + let partial_blobs = vec![blobs.1.first().unwrap().clone()]; // Simulate the block being seen on gossip. block diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 627b0d0b17..e45dcf221c 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -139,7 +139,7 @@ impl ForkChoiceUpdates { fn insert(&mut self, update: ForkChoiceUpdateMetadata) { self.updates .entry(update.state.head_block_hash) - .or_insert_with(Vec::new) + .or_default() .push(update); } diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 01731530d3..dd481f23ba 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -57,18 +57,18 @@ async fn el_syncing_then_synced() { mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); // EL synced mock_el.server.set_syncing_response(Ok(false)); mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `syncing` endpoint when the EL is offline (errors on upcheck). @@ -85,9 +85,9 @@ async fn el_offline() { mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, true); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `syncing` endpoint when the EL errors on newPaylod but is not fully offline. @@ -128,9 +128,9 @@ async fn el_error_on_new_payload() { // The EL should now be *offline* according to the API. let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, true); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); // Processing a block successfully should remove the status. mock_el.server.set_new_payload_status( @@ -144,9 +144,9 @@ async fn el_error_on_new_payload() { harness.process_block_result((block, blobs)).await.unwrap(); let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `node health` endpoint when the EL is offline. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 080a393b4d..7007a14466 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -274,10 +274,10 @@ impl ApiTester { let mock_builder_server = harness.set_mock_builder(beacon_url.clone()); // Start the mock builder service prior to building the chain out. - harness.runtime.task_executor.spawn( - async move { mock_builder_server.await }, - "mock_builder_server", - ); + harness + .runtime + .task_executor + .spawn(mock_builder_server, "mock_builder_server"); let mock_builder = harness.mock_builder.clone(); @@ -641,7 +641,7 @@ impl ApiTester { self } - pub async fn test_beacon_blocks_finalized(self) -> Self { + pub async fn test_beacon_blocks_finalized(self) -> Self { for block_id in self.interesting_block_ids() { let block_root = block_id.root(&self.chain); let block = block_id.full_block(&self.chain).await; @@ -678,7 +678,7 @@ impl ApiTester { self } - pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { + pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { for block_id in self.interesting_block_ids() { let block_root = block_id.root(&self.chain); let block = block_id.full_block(&self.chain).await; @@ -819,7 +819,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let unsupported_media_response = self @@ -859,7 +859,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let validator_pubkey_ids = validator_indices .iter() @@ -910,7 +910,7 @@ impl ApiTester { for i in validator_indices { if i < state.balances().len() as u64 { validators.push(ValidatorBalanceData { - index: i as u64, + index: i, balance: *state.balances().get(i as usize).unwrap(), }); } @@ -944,7 +944,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let validator_pubkey_ids = validator_indices .iter() @@ -1012,7 +1012,7 @@ impl ApiTester { || statuses.contains(&status.superstatus()) { validators.push(ValidatorData { - index: i as u64, + index: i, balance: *state.balances().get(i as usize).unwrap(), status, validator, @@ -1641,11 +1641,7 @@ impl ApiTester { let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let num_blobs = block.num_expected_blobs(); let blob_indices = if use_indices { - Some( - (0..num_blobs.saturating_sub(1) as u64) - .into_iter() - .collect::>(), - ) + Some((0..num_blobs.saturating_sub(1) as u64).collect::>()) } else { None }; @@ -1663,7 +1659,7 @@ impl ApiTester { blob_indices.map_or(num_blobs, |indices| indices.len()) ); let expected = block.slot(); - assert_eq!(result.get(0).unwrap().slot(), expected); + assert_eq!(result.first().unwrap().slot(), expected); self } @@ -1701,9 +1697,9 @@ impl ApiTester { break; } } - let test_slot = test_slot.expect(&format!( - "should be able to find a block matching zero_blobs={zero_blobs}" - )); + let test_slot = test_slot.unwrap_or_else(|| { + panic!("should be able to find a block matching zero_blobs={zero_blobs}") + }); match self .client @@ -1772,7 +1768,6 @@ impl ApiTester { .attestations() .map(|att| att.clone_as_attestation()) .collect::>() - .into() }, ); @@ -1909,7 +1904,7 @@ impl ApiTester { let result = match self .client - .get_beacon_light_client_updates::(current_sync_committee_period as u64, 1) + .get_beacon_light_client_updates::(current_sync_committee_period, 1) .await { Ok(result) => result, @@ -1921,7 +1916,7 @@ impl ApiTester { .light_client_server_cache .get_light_client_updates( &self.chain.store, - current_sync_committee_period as u64, + current_sync_committee_period, 1, &self.chain.spec, ) @@ -2314,7 +2309,7 @@ impl ApiTester { .unwrap() .data .is_syncing; - assert_eq!(is_syncing, true); + assert!(is_syncing); // Reset sync state. *self @@ -2364,7 +2359,7 @@ impl ApiTester { pub async fn test_get_node_peers_by_id(self) -> Self { let result = self .client - .get_node_peers_by_id(self.external_peer_id.clone()) + .get_node_peers_by_id(self.external_peer_id) .await .unwrap() .data; @@ -3514,6 +3509,7 @@ impl ApiTester { self } + #[allow(clippy::await_holding_lock)] // This is a test, so it should be fine. pub async fn test_get_validator_aggregate_attestation(self) -> Self { if self .chain @@ -4058,7 +4054,7 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); @@ -4085,7 +4081,7 @@ impl ApiTester { ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); // This is the graffiti of the mock execution layer, not the builder. assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -4113,7 +4109,7 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); @@ -4137,7 +4133,7 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); @@ -4183,7 +4179,7 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), builder_limit); @@ -4267,7 +4263,7 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), 30_000_000); @@ -5140,9 +5136,8 @@ impl ApiTester { pub async fn test_builder_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. - self.harness.mock_execution_layer.as_ref().map(|el| { + self.harness.mock_execution_layer.as_ref().inspect(|el| { el.server.all_payloads_syncing(true); - el }); self.harness .extend_chain( @@ -5169,7 +5164,7 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); // If this cache is populated, it indicates fallback to the local EE was correctly used. @@ -5188,9 +5183,8 @@ impl ApiTester { pub async fn test_builder_v3_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. - self.harness.mock_execution_layer.as_ref().map(|el| { + self.harness.mock_execution_layer.as_ref().inspect(|el| { el.server.all_payloads_syncing(true); - el }); self.harness .extend_chain( @@ -5220,7 +5214,7 @@ impl ApiTester { ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); self @@ -6101,16 +6095,17 @@ impl ApiTester { assert_eq!(result.execution_optimistic, Some(false)); // Change head to be optimistic. - self.chain + if let Some(head_node) = self + .chain .canonical_head .fork_choice_write_lock() .proto_array_mut() .core_proto_array_mut() .nodes .last_mut() - .map(|head_node| { - head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) - }); + { + head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) + } // Check responses are now optimistic. let result = self @@ -6143,8 +6138,8 @@ async fn poll_events, eth2::Error>> + Unpin }; tokio::select! { - _ = collect_stream_fut => {events} - _ = tokio::time::sleep(timeout) => { return events; } + _ = collect_stream_fut => { events } + _ = tokio::time::sleep(timeout) => { events } } } @@ -6180,31 +6175,31 @@ async fn test_unsupported_media_response() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_get() { +async fn beacon_get_state_hashes() { + ApiTester::new() + .await + .test_beacon_states_root_finalized() + .await + .test_beacon_states_finality_checkpoints_finalized() + .await + .test_beacon_states_root() + .await + .test_beacon_states_finality_checkpoints() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_state_info() { ApiTester::new() .await .test_beacon_genesis() .await - .test_beacon_states_root_finalized() - .await .test_beacon_states_fork_finalized() .await - .test_beacon_states_finality_checkpoints_finalized() - .await - .test_beacon_headers_block_id_finalized() - .await - .test_beacon_blocks_finalized::() - .await - .test_beacon_blinded_blocks_finalized::() - .await .test_debug_beacon_states_finalized() .await - .test_beacon_states_root() - .await .test_beacon_states_fork() .await - .test_beacon_states_finality_checkpoints() - .await .test_beacon_states_validators() .await .test_beacon_states_validator_balances() @@ -6214,6 +6209,18 @@ async fn beacon_get() { .test_beacon_states_validator_id() .await .test_beacon_states_randao() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_blocks() { + ApiTester::new() + .await + .test_beacon_headers_block_id_finalized() + .await + .test_beacon_blocks_finalized() + .await + .test_beacon_blinded_blocks_finalized() .await .test_beacon_headers_all_slots() .await @@ -6228,6 +6235,12 @@ async fn beacon_get() { .test_beacon_blocks_attestations() .await .test_beacon_blocks_root() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_pools() { + ApiTester::new() .await .test_get_beacon_pool_attestations() .await diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 0ad31ff2e8..e46c69dc71 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -250,18 +250,17 @@ impl futures::stream::Stream for GossipCache { Poll::Ready(Some(expired)) => { let expected_key = expired.key(); let (topic, data) = expired.into_inner(); - match self.topic_msgs.get_mut(&topic) { - Some(msgs) => { - let key = msgs.remove(&data); - debug_assert_eq!(key, Some(expected_key)); - if msgs.is_empty() { - // no more messages for this topic. - self.topic_msgs.remove(&topic); - } - } - None => { - #[cfg(debug_assertions)] - panic!("Topic for registered message is not present.") + let topic_msg = self.topic_msgs.get_mut(&topic); + debug_assert!( + topic_msg.is_some(), + "Topic for registered message is not present." + ); + if let Some(msgs) = topic_msg { + let key = msgs.remove(&data); + debug_assert_eq!(key, Some(expected_key)); + if msgs.is_empty() { + // no more messages for this topic. + self.topic_msgs.remove(&topic); } } Poll::Ready(Some(Ok(topic))) diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 9d774d97c1..7e27a91bd6 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -527,7 +527,7 @@ impl TestRig { self.assert_event_journal( &expected .iter() - .map(|ev| Into::<&'static str>::into(ev)) + .map(Into::<&'static str>::into) .chain(std::iter::once(WORKER_FREED)) .chain(std::iter::once(NOTHING_TO_DO)) .collect::>(), diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index c46e46e0fa..32bbfcbcaa 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -1,235 +1,229 @@ -#[cfg(not(debug_assertions))] -#[cfg(test)] -mod tests { - use crate::persisted_dht::load_dht; - use crate::{NetworkConfig, NetworkService}; - use beacon_chain::test_utils::BeaconChainHarness; - use beacon_chain::BeaconChainTypes; - use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; - use futures::StreamExt; - use lighthouse_network::types::{GossipEncoding, GossipKind}; - use lighthouse_network::{Enr, GossipTopic}; - use slog::{o, Drain, Level, Logger}; - use sloggers::{null::NullLoggerBuilder, Build}; - use std::str::FromStr; - use std::sync::Arc; - use tokio::runtime::Runtime; - use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; +#![cfg(not(debug_assertions))] +#![cfg(test)] +use crate::persisted_dht::load_dht; +use crate::{NetworkConfig, NetworkService}; +use beacon_chain::test_utils::BeaconChainHarness; +use beacon_chain::BeaconChainTypes; +use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; +use futures::StreamExt; +use lighthouse_network::types::{GossipEncoding, GossipKind}; +use lighthouse_network::{Enr, GossipTopic}; +use slog::{o, Drain, Level, Logger}; +use sloggers::{null::NullLoggerBuilder, Build}; +use std::str::FromStr; +use std::sync::Arc; +use tokio::runtime::Runtime; +use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; - impl NetworkService { - fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { - self.libp2p.get_topic_params(topic) - } +impl NetworkService { + fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { + self.libp2p.get_topic_params(topic) } +} - fn get_logger(actual_log: bool) -> Logger { - if actual_log { - let drain = { - let decorator = slog_term::TermDecorator::new().build(); - let decorator = - logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).chan_size(2048).build(); - drain.filter_level(Level::Debug) - }; +fn get_logger(actual_log: bool) -> Logger { + if actual_log { + let drain = { + let decorator = slog_term::TermDecorator::new().build(); + let decorator = + logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).chan_size(2048).build(); + drain.filter_level(Level::Debug) + }; - Logger::root(drain.fuse(), o!()) - } else { - let builder = NullLoggerBuilder; - builder.build().expect("should build logger") - } + Logger::root(drain.fuse(), o!()) + } else { + let builder = NullLoggerBuilder; + builder.build().expect("should build logger") } +} - #[test] - fn test_dht_persistence() { - let log = get_logger(false); +#[test] +fn test_dht_persistence() { + let log = get_logger(false); - let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .deterministic_keypairs(8) - .fresh_ephemeral_store() - .build() - .chain; + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .default_spec() + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .build() + .chain; - let store = beacon_chain.store.clone(); + let store = beacon_chain.store.clone(); - let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap(); - let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); - let enrs = vec![enr1, enr2]; + let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap(); + let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); + let enrs = vec![enr1, enr2]; - let runtime = Arc::new(Runtime::new().unwrap()); + let runtime = Arc::new(Runtime::new().unwrap()); - let (signal, exit) = async_channel::bounded(1); + let (signal, exit) = async_channel::bounded(1); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = + task_executor::TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + + let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); + config.discv5_config.table_filter = |_| true; // Do not ignore local IPs + config.upnp_enabled = false; + config.boot_nodes_enr = enrs.clone(); + let config = Arc::new(config); + runtime.block_on(async move { + // Create a new network service which implicitly gets dropped at the + // end of the block. + + let BeaconProcessorChannels { + beacon_processor_tx, + beacon_processor_rx: _beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx: _work_reprocessing_rx, + } = <_>::default(); + + let _network_service = NetworkService::start( + beacon_chain.clone(), + config, + executor, + None, + beacon_processor_tx, + work_reprocessing_tx, + ) + .await + .unwrap(); + drop(signal); + }); + + let raw_runtime = Arc::try_unwrap(runtime).unwrap(); + raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300)); + + // Load the persisted dht from the store + let persisted_enrs = load_dht(store); + assert!( + persisted_enrs.contains(&enrs[0]), + "should have persisted the first ENR to store" + ); + assert!( + persisted_enrs.contains(&enrs[1]), + "should have persisted the second ENR to store" + ); +} + +// Test removing topic weight on old topics when a fork happens. +#[test] +fn test_removing_topic_weight_on_old_topics() { + let runtime = Arc::new(Runtime::new().unwrap()); + + // Capella spec + let mut spec = MinimalEthSpec::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(1)); + + // Build beacon chain. + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec.clone().into()) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build() + .chain; + let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); + assert_eq!(next_fork_name, ForkName::Capella); + + // Build network service. + let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { + let (_, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new( Arc::downgrade(&runtime), exit, - log.clone(), + get_logger(false), shutdown_tx, ); let mut config = NetworkConfig::default(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); config.discv5_config.table_filter = |_| true; // Do not ignore local IPs config.upnp_enabled = false; - config.boot_nodes_enr = enrs.clone(); let config = Arc::new(config); - runtime.block_on(async move { - // Create a new network service which implicitly gets dropped at the - // end of the block. - let BeaconProcessorChannels { - beacon_processor_tx, - beacon_processor_rx: _beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx: _work_reprocessing_rx, - } = <_>::default(); + let beacon_processor_channels = + BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); + NetworkService::build( + beacon_chain.clone(), + config, + executor.clone(), + None, + beacon_processor_channels.beacon_processor_tx, + beacon_processor_channels.work_reprocessing_tx, + ) + .await + .unwrap() + }); - let _network_service = NetworkService::start( - beacon_chain.clone(), - config, - executor, - None, - beacon_processor_tx, - work_reprocessing_tx, - ) - .await - .unwrap(); - drop(signal); - }); - - let raw_runtime = Arc::try_unwrap(runtime).unwrap(); - raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300)); - - // Load the persisted dht from the store - let persisted_enrs = load_dht(store); - assert!( - persisted_enrs.contains(&enrs[0]), - "should have persisted the first ENR to store" - ); - assert!( - persisted_enrs.contains(&enrs[1]), - "should have persisted the second ENR to store" - ); - } - - // Test removing topic weight on old topics when a fork happens. - #[test] - fn test_removing_topic_weight_on_old_topics() { - let runtime = Arc::new(Runtime::new().unwrap()); - - // Capella spec - let mut spec = MinimalEthSpec::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.capella_fork_epoch = Some(Epoch::new(1)); - - // Build beacon chain. - let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec.clone().into()) - .deterministic_keypairs(8) - .fresh_ephemeral_store() - .mock_execution_layer() - .build() - .chain; - let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); - assert_eq!(next_fork_name, ForkName::Capella); - - // Build network service. - let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { - let (_, exit) = async_channel::bounded(1); - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = task_executor::TaskExecutor::new( - Arc::downgrade(&runtime), - exit, - get_logger(false), - shutdown_tx, - ); - - let mut config = NetworkConfig::default(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); - config.discv5_config.table_filter = |_| true; // Do not ignore local IPs - config.upnp_enabled = false; - let config = Arc::new(config); - - let beacon_processor_channels = - BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); - NetworkService::build( - beacon_chain.clone(), - config, - executor.clone(), - None, - beacon_processor_channels.beacon_processor_tx, - beacon_processor_channels.work_reprocessing_tx, - ) - .await - .unwrap() - }); - - // Subscribe to the topics. - runtime.block_on(async { - while network_globals.gossipsub_subscriptions.read().len() < 2 { - if let Some(msg) = network_service.subnet_service.next().await { - network_service.on_subnet_service_msg(msg); - } + // Subscribe to the topics. + runtime.block_on(async { + while network_globals.gossipsub_subscriptions.read().len() < 2 { + if let Some(msg) = network_service.subnet_service.next().await { + network_service.on_subnet_service_msg(msg); } - }); - - // Make sure the service is subscribed to the topics. - let (old_topic1, old_topic2) = { - let mut subnets = SubnetId::compute_attestation_subnets( - network_globals.local_enr().node_id().raw(), - &spec, - ) - .collect::>(); - assert_eq!(2, subnets.len()); - - let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; - let old_topic1 = GossipTopic::new( - GossipKind::Attestation(subnets.pop().unwrap()), - GossipEncoding::SSZSnappy, - old_fork_digest, - ); - let old_topic2 = GossipTopic::new( - GossipKind::Attestation(subnets.pop().unwrap()), - GossipEncoding::SSZSnappy, - old_fork_digest, - ); - - (old_topic1, old_topic2) - }; - let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); - assert_eq!(2, subscriptions.len()); - assert!(subscriptions.contains(&old_topic1)); - assert!(subscriptions.contains(&old_topic2)); - let old_topic_params1 = network_service - .get_topic_params(old_topic1.clone()) - .expect("topic score params"); - assert!(old_topic_params1.topic_weight > 0.0); - let old_topic_params2 = network_service - .get_topic_params(old_topic2.clone()) - .expect("topic score params"); - assert!(old_topic_params2.topic_weight > 0.0); - - // Advance slot to the next fork - for _ in 0..MinimalEthSpec::slots_per_epoch() { - beacon_chain.slot_clock.advance_slot(); } + }); - // Run `NetworkService::update_next_fork()`. - runtime.block_on(async { - network_service.update_next_fork(); - }); + // Make sure the service is subscribed to the topics. + let (old_topic1, old_topic2) = { + let mut subnets = SubnetId::compute_attestation_subnets( + network_globals.local_enr().node_id().raw(), + &spec, + ) + .collect::>(); + assert_eq!(2, subnets.len()); - // Check that topic_weight on the old topics has been zeroed. - let old_topic_params1 = network_service - .get_topic_params(old_topic1) - .expect("topic score params"); - assert_eq!(0.0, old_topic_params1.topic_weight); + let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; + let old_topic1 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + let old_topic2 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); - let old_topic_params2 = network_service - .get_topic_params(old_topic2) - .expect("topic score params"); - assert_eq!(0.0, old_topic_params2.topic_weight); + (old_topic1, old_topic2) + }; + let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); + assert_eq!(2, subscriptions.len()); + assert!(subscriptions.contains(&old_topic1)); + assert!(subscriptions.contains(&old_topic2)); + let old_topic_params1 = network_service + .get_topic_params(old_topic1.clone()) + .expect("topic score params"); + assert!(old_topic_params1.topic_weight > 0.0); + let old_topic_params2 = network_service + .get_topic_params(old_topic2.clone()) + .expect("topic score params"); + assert!(old_topic_params2.topic_weight > 0.0); + + // Advance slot to the next fork + for _ in 0..MinimalEthSpec::slots_per_epoch() { + beacon_chain.slot_clock.advance_slot(); } + + // Run `NetworkService::update_next_fork()`. + runtime.block_on(async { + network_service.update_next_fork(); + }); + + // Check that topic_weight on the old topics has been zeroed. + let old_topic_params1 = network_service + .get_topic_params(old_topic1) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params1.topic_weight); + + let old_topic_params2 = network_service + .get_topic_params(old_topic2) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params2.topic_weight); } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 3a002bf870..d01c73118c 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -877,11 +877,11 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(1); // Only run this test on the phase0 hard-fork. - if spec.altair_fork_epoch != None { + if spec.altair_fork_epoch.is_some() { return; } - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) @@ -902,10 +902,10 @@ mod release_tests { ); for (atts, aggregate) in &attestations { - let att2 = aggregate.as_ref().unwrap().message().aggregate().clone(); + let att2 = aggregate.as_ref().unwrap().message().aggregate(); let att1 = atts - .into_iter() + .iter() .map(|(att, _)| att) .take(2) .fold::>, _>(None, |att, new_att| { @@ -946,7 +946,7 @@ mod release_tests { .unwrap(); assert_eq!( - committees.get(0).unwrap().committee.len() - 2, + committees.first().unwrap().committee.len() - 2, earliest_attestation_validators( &att2_split.as_ref(), &state, @@ -963,7 +963,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(1); let op_pool = OperationPool::::new(); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let slot = state.slot(); let committees = state @@ -1020,7 +1020,7 @@ mod release_tests { let agg_att = &block_attestations[0]; assert_eq!( agg_att.num_set_aggregation_bits(), - spec.target_committee_size as usize + spec.target_committee_size ); // Prune attestations shouldn't do anything at this point. @@ -1039,7 +1039,7 @@ mod release_tests { fn attestation_duplicate() { let (harness, ref spec) = attestation_test_state::(1); - let state = get_current_state_initialize_epoch_cache(&harness, &spec); + let state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1082,7 +1082,7 @@ mod release_tests { fn attestation_pairwise_overlapping() { let (harness, ref spec) = attestation_test_state::(1); - let state = get_current_state_initialize_epoch_cache(&harness, &spec); + let state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1113,19 +1113,17 @@ mod release_tests { let aggs1 = atts1 .chunks_exact(step_size * 2) .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { + let agg = chunk + .iter() + .map(|(att, _)| att) + .fold::>, _>(None, |att, new_att| { if let Some(mut a) = att { a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) } - }, - ); + }); agg.unwrap() }) .collect::>(); @@ -1136,19 +1134,17 @@ mod release_tests { .as_slice() .chunks_exact(step_size * 2) .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { + let agg = chunk + .iter() + .map(|(att, _)| att) + .fold::>, _>(None, |att, new_att| { if let Some(mut a) = att { a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) } - }, - ); + }); agg.unwrap() }) .collect::>(); @@ -1181,7 +1177,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(num_committees); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1194,7 +1190,7 @@ mod release_tests { .collect::>(); let max_attestations = ::MaxAttestations::to_usize(); - let target_committee_size = spec.target_committee_size as usize; + let target_committee_size = spec.target_committee_size; let num_validators = num_committees * MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size; @@ -1209,12 +1205,12 @@ mod release_tests { let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, step_size| { - let att_0 = attestations.get(0).unwrap().0.clone(); + let att_0 = attestations.first().unwrap().0.clone(); let aggs = attestations .chunks_exact(step_size) .map(|chunk| { chunk - .into_iter() + .iter() .map(|(att, _)| att) .fold::, _>( att_0.clone(), @@ -1296,7 +1292,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(num_committees); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); let slot = state.slot(); @@ -1308,7 +1304,7 @@ mod release_tests { .collect::>(); let max_attestations = ::MaxAttestations::to_usize(); - let target_committee_size = spec.target_committee_size as usize; + let target_committee_size = spec.target_committee_size; // Each validator will have a multiple of 1_000_000_000 wei. // Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000). @@ -1329,12 +1325,12 @@ mod release_tests { let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, step_size| { - let att_0 = attestations.get(0).unwrap().0.clone(); + let att_0 = attestations.first().unwrap().0.clone(); let aggs = attestations .chunks_exact(step_size) .map(|chunk| { chunk - .into_iter() + .iter() .map(|(att, _)| att) .fold::, _>( att_0.clone(), @@ -1615,7 +1611,6 @@ mod release_tests { let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1674,7 +1669,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1711,7 +1705,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1791,7 +1784,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, diff --git a/common/eth2_wallet_manager/src/wallet_manager.rs b/common/eth2_wallet_manager/src/wallet_manager.rs index 3dd419a48b..c988ca4135 100644 --- a/common/eth2_wallet_manager/src/wallet_manager.rs +++ b/common/eth2_wallet_manager/src/wallet_manager.rs @@ -296,10 +296,10 @@ mod tests { ) .expect("should create first wallet"); - let uuid = w.wallet().uuid().clone(); + let uuid = *w.wallet().uuid(); assert_eq!( - load_wallet_raw(&base_dir, &uuid).nextaccount(), + load_wallet_raw(base_dir, &uuid).nextaccount(), 0, "should start wallet with nextaccount 0" ); @@ -308,7 +308,7 @@ mod tests { w.next_validator(WALLET_PASSWORD, &[50; 32], &[51; 32]) .expect("should create validator"); assert_eq!( - load_wallet_raw(&base_dir, &uuid).nextaccount(), + load_wallet_raw(base_dir, &uuid).nextaccount(), i, "should update wallet with nextaccount {}", i @@ -333,54 +333,54 @@ mod tests { let base_dir = dir.path(); let mgr = WalletManager::open(base_dir).unwrap(); - let uuid_a = create_wallet(&mgr, 0).wallet().uuid().clone(); - let uuid_b = create_wallet(&mgr, 1).wallet().uuid().clone(); + let uuid_a = *create_wallet(&mgr, 0).wallet().uuid(); + let uuid_b = *create_wallet(&mgr, 1).wallet().uuid(); - let locked_a = LockedWallet::open(&base_dir, &uuid_a).expect("should open wallet a"); + let locked_a = LockedWallet::open(base_dir, &uuid_a).expect("should open wallet a"); assert!( - lockfile_path(&base_dir, &uuid_a).exists(), + lockfile_path(base_dir, &uuid_a).exists(), "lockfile should exist" ); drop(locked_a); assert!( - !lockfile_path(&base_dir, &uuid_a).exists(), + !lockfile_path(base_dir, &uuid_a).exists(), "lockfile have been cleaned up" ); - let locked_a = LockedWallet::open(&base_dir, &uuid_a).expect("should open wallet a"); - let locked_b = LockedWallet::open(&base_dir, &uuid_b).expect("should open wallet b"); + let locked_a = LockedWallet::open(base_dir, &uuid_a).expect("should open wallet a"); + let locked_b = LockedWallet::open(base_dir, &uuid_b).expect("should open wallet b"); assert!( - lockfile_path(&base_dir, &uuid_a).exists(), + lockfile_path(base_dir, &uuid_a).exists(), "lockfile a should exist" ); assert!( - lockfile_path(&base_dir, &uuid_b).exists(), + lockfile_path(base_dir, &uuid_b).exists(), "lockfile b should exist" ); - match LockedWallet::open(&base_dir, &uuid_a) { + match LockedWallet::open(base_dir, &uuid_a) { Err(Error::LockfileError(_)) => {} _ => panic!("did not get locked error"), }; drop(locked_a); - LockedWallet::open(&base_dir, &uuid_a) + LockedWallet::open(base_dir, &uuid_a) .expect("should open wallet a after previous instance is dropped"); - match LockedWallet::open(&base_dir, &uuid_b) { + match LockedWallet::open(base_dir, &uuid_b) { Err(Error::LockfileError(_)) => {} _ => panic!("did not get locked error"), }; drop(locked_b); - LockedWallet::open(&base_dir, &uuid_b) + LockedWallet::open(base_dir, &uuid_b) .expect("should open wallet a after previous instance is dropped"); } } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 29265e34e4..ef017159a0 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1156,18 +1156,20 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { }; // recreate the chain exactly - ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) - .await - .unwrap() - .skip_slots(E::slots_per_epoch() as usize) - .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) - .await - .unwrap() - .apply_blocks(1) - .await - .assert_finalized_epoch(5) - .assert_shutdown_signal_not_sent(); + Box::pin( + ForkChoiceTest::new_with_chain_config(chain_config.clone()) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .skip_slots(E::slots_per_epoch() as usize) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await + .unwrap() + .apply_blocks(1), + ) + .await + .assert_finalized_epoch(5) + .assert_shutdown_signal_not_sent(); } #[tokio::test] diff --git a/crypto/eth2_keystore/tests/eip2335_vectors.rs b/crypto/eth2_keystore/tests/eip2335_vectors.rs index 3702a21816..e6852cc608 100644 --- a/crypto/eth2_keystore/tests/eip2335_vectors.rs +++ b/crypto/eth2_keystore/tests/eip2335_vectors.rs @@ -58,7 +58,7 @@ fn eip2335_test_vector_scrypt() { } "#; - let keystore = decode_and_check_sk(&vector); + let keystore = decode_and_check_sk(vector); assert_eq!( *keystore.uuid(), Uuid::parse_str("1d85ae20-35c5-4611-98e8-aa14a633906f").unwrap(), @@ -102,7 +102,7 @@ fn eip2335_test_vector_pbkdf() { } "#; - let keystore = decode_and_check_sk(&vector); + let keystore = decode_and_check_sk(vector); assert_eq!( *keystore.uuid(), Uuid::parse_str("64625def-3331-4eea-ab6f-782f3ed16a83").unwrap(), diff --git a/crypto/eth2_keystore/tests/tests.rs b/crypto/eth2_keystore/tests/tests.rs index 0df884b8a2..20bf9f1653 100644 --- a/crypto/eth2_keystore/tests/tests.rs +++ b/crypto/eth2_keystore/tests/tests.rs @@ -54,25 +54,17 @@ fn file() { let dir = tempdir().unwrap(); let path = dir.path().join("keystore.json"); - let get_file = || { - File::options() - .write(true) - .read(true) - .create(true) - .open(path.clone()) - .expect("should create file") - }; - let keystore = KeystoreBuilder::new(&keypair, GOOD_PASSWORD, "".into()) .unwrap() .build() .unwrap(); keystore - .to_json_writer(&mut get_file()) + .to_json_writer(File::create_new(&path).unwrap()) .expect("should write to file"); - let decoded = Keystore::from_json_reader(&mut get_file()).expect("should read from file"); + let decoded = + Keystore::from_json_reader(File::open(&path).unwrap()).expect("should read from file"); assert_eq!( decoded.decrypt_keypair(BAD_PASSWORD).err().unwrap(), diff --git a/crypto/eth2_wallet/tests/tests.rs b/crypto/eth2_wallet/tests/tests.rs index fe4565e0db..3dc073f764 100644 --- a/crypto/eth2_wallet/tests/tests.rs +++ b/crypto/eth2_wallet/tests/tests.rs @@ -132,20 +132,11 @@ fn file_round_trip() { let dir = tempdir().unwrap(); let path = dir.path().join("keystore.json"); - let get_file = || { - File::options() - .write(true) - .read(true) - .create(true) - .open(path.clone()) - .expect("should create file") - }; - wallet - .to_json_writer(&mut get_file()) + .to_json_writer(File::create_new(&path).unwrap()) .expect("should write to file"); - let decoded = Wallet::from_json_reader(&mut get_file()).unwrap(); + let decoded = Wallet::from_json_reader(File::open(&path).unwrap()).unwrap(); assert_eq!( decoded.decrypt_seed(&[1, 2, 3]).err().unwrap(), diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index c7153f48ef..d53d042fa4 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -115,7 +115,7 @@ fn create_wallet>( .arg(base_dir.as_ref().as_os_str()) .arg(CREATE_CMD) .arg(format!("--{}", NAME_FLAG)) - .arg(&name) + .arg(name) .arg(format!("--{}", PASSWORD_FLAG)) .arg(password.as_ref().as_os_str()) .arg(format!("--{}", MNEMONIC_FLAG)) @@ -273,16 +273,16 @@ impl TestValidator { .expect("stdout is not utf8") .to_string(); - if stdout == "" { + if stdout.is_empty() { return Ok(vec![]); } let pubkeys = stdout[..stdout.len() - 1] .split("\n") - .filter_map(|line| { + .map(|line| { let tab = line.find("\t").expect("line must have tab"); let (_, pubkey) = line.split_at(tab + 1); - Some(pubkey.to_string()) + pubkey.to_string() }) .collect::>(); @@ -446,7 +446,9 @@ fn validator_import_launchpad() { } } - stdin.write(format!("{}\n", PASSWORD).as_bytes()).unwrap(); + stdin + .write_all(format!("{}\n", PASSWORD).as_bytes()) + .unwrap(); child.wait().unwrap(); @@ -504,7 +506,7 @@ fn validator_import_launchpad() { }; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); @@ -525,7 +527,7 @@ fn validator_import_launchpad() { expected_def.enabled = true; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); } @@ -582,7 +584,7 @@ fn validator_import_launchpad_no_password_then_add_password() { let mut child = validator_import_key_cmd(); wait_for_password_prompt(&mut child); let stdin = child.stdin.as_mut().unwrap(); - stdin.write("\n".as_bytes()).unwrap(); + stdin.write_all("\n".as_bytes()).unwrap(); child.wait().unwrap(); assert!( @@ -628,14 +630,16 @@ fn validator_import_launchpad_no_password_then_add_password() { }; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); let mut child = validator_import_key_cmd(); wait_for_password_prompt(&mut child); let stdin = child.stdin.as_mut().unwrap(); - stdin.write(format!("{}\n", PASSWORD).as_bytes()).unwrap(); + stdin + .write_all(format!("{}\n", PASSWORD).as_bytes()) + .unwrap(); child.wait().unwrap(); let expected_def = ValidatorDefinition { @@ -657,7 +661,7 @@ fn validator_import_launchpad_no_password_then_add_password() { let defs = ValidatorDefinitions::open(&dst_dir).unwrap(); assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); } @@ -759,7 +763,7 @@ fn validator_import_launchpad_password_file() { }; assert!( - defs.as_slice() == &[expected_def], + defs.as_slice() == [expected_def], "validator defs file should be accurate" ); } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 80986653c1..88e05dfa12 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -9,7 +9,6 @@ use beacon_node::beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_processor::BeaconProcessorConfig; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; -use lighthouse_version; use std::fs::File; use std::io::{Read, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; @@ -128,7 +127,7 @@ fn allow_insecure_genesis_sync_default() { CommandLineTest::new() .run_with_zero_port_and_no_genesis_sync() .with_config(|config| { - assert_eq!(config.allow_insecure_genesis_sync, false); + assert!(!config.allow_insecure_genesis_sync); }); } @@ -146,7 +145,7 @@ fn allow_insecure_genesis_sync_enabled() { .flag("allow-insecure-genesis-sync", None) .run_with_zero_port_and_no_genesis_sync() .with_config(|config| { - assert_eq!(config.allow_insecure_genesis_sync, true); + assert!(config.allow_insecure_genesis_sync); }); } @@ -359,11 +358,11 @@ fn default_graffiti() { #[test] fn trusted_peers_flag() { - let peers = vec![PeerId::random(), PeerId::random()]; + let peers = [PeerId::random(), PeerId::random()]; CommandLineTest::new() .flag( "trusted-peers", - Some(format!("{},{}", peers[0].to_string(), peers[1].to_string()).as_str()), + Some(format!("{},{}", peers[0], peers[1]).as_str()), ) .run_with_zero_port() .with_config(|config| { @@ -383,7 +382,7 @@ fn genesis_backfill_flag() { CommandLineTest::new() .flag("genesis-backfill", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); + .with_config(|config| assert!(config.chain.genesis_backfill)); } /// The genesis backfill flag should be enabled if historic states flag is set. @@ -392,7 +391,7 @@ fn genesis_backfill_with_historic_flag() { CommandLineTest::new() .flag("reconstruct-historic-states", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); + .with_config(|config| assert!(config.chain.genesis_backfill)); } // Tests for Eth1 flags. @@ -448,7 +447,7 @@ fn eth1_cache_follow_distance_manual() { // Tests for Bellatrix flags. fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { use sensitive_url::SensitiveUrl; - let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; + let urls = ["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; // we don't support redundancy for execution-endpoints // only the first provided endpoint is parsed. @@ -480,10 +479,10 @@ fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.execution_endpoint.is_some(), true); + assert!(config.execution_endpoint.is_some()); assert_eq!( config.execution_endpoint.as_ref().unwrap().clone(), - SensitiveUrl::parse(&urls[0]).unwrap() + SensitiveUrl::parse(urls[0]).unwrap() ); // Only the first secret file should be used. assert_eq!( @@ -595,7 +594,7 @@ fn run_payload_builder_flag_test(flag: &str, builders: &str) { let config = config.execution_layer.as_ref().unwrap(); // Only first provided endpoint is parsed as we don't support // redundancy. - assert_eq!(config.builder_url, all_builders.get(0).cloned()); + assert_eq!(config.builder_url, all_builders.first().cloned()); }) } fn run_payload_builder_flag_test_with_config( @@ -661,7 +660,7 @@ fn builder_fallback_flags() { Some("builder-fallback-disable-checks"), None, |config| { - assert_eq!(config.chain.builder_fallback_disable_checks, true); + assert!(config.chain.builder_fallback_disable_checks); }, ); } @@ -1657,19 +1656,19 @@ fn http_enable_beacon_processor() { CommandLineTest::new() .flag("http", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + .with_config(|config| assert!(config.http_api.enable_beacon_processor)); CommandLineTest::new() .flag("http", None) .flag("http-enable-beacon-processor", Some("true")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + .with_config(|config| assert!(config.http_api.enable_beacon_processor)); CommandLineTest::new() .flag("http", None) .flag("http-enable-beacon-processor", Some("false")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, false)); + .with_config(|config| assert!(!config.http_api.enable_beacon_processor)); } #[test] fn http_tls_flags() { @@ -2221,7 +2220,7 @@ fn slasher_broadcast_flag_false() { }); } -#[cfg(all(feature = "slasher-lmdb"))] +#[cfg(feature = "slasher-lmdb")] #[test] fn slasher_backend_override_to_default() { // Hard to test this flag because all but one backend is disabled by default and the backend @@ -2429,7 +2428,7 @@ fn logfile_no_restricted_perms_flag() { .flag("logfile-no-restricted-perms", None) .run_with_zero_port() .with_config(|config| { - assert!(config.logger_config.is_restricted == false); + assert!(!config.logger_config.is_restricted); }); } #[test] @@ -2454,7 +2453,7 @@ fn logfile_format_flag() { fn sync_eth1_chain_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); + .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] @@ -2467,7 +2466,7 @@ fn sync_eth1_chain_execution_endpoints_flag() { dir.path().join("jwt-file").as_os_str().to_str(), ) .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); + .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] @@ -2481,7 +2480,7 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { dir.path().join("jwt-file").as_os_str().to_str(), ) .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); + .with_config(|config| assert!(!config.sync_eth1_chain)); } #[test] @@ -2504,9 +2503,9 @@ fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enable_light_client_server, false); - assert_eq!(config.chain.enable_light_client_server, false); - assert_eq!(config.http_api.enable_light_client_server, false); + assert!(!config.network.enable_light_client_server); + assert!(!config.chain.enable_light_client_server); + assert!(!config.http_api.enable_light_client_server); }); } @@ -2516,8 +2515,8 @@ fn light_client_server_enabled() { .flag("light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enable_light_client_server, true); - assert_eq!(config.chain.enable_light_client_server, true); + assert!(config.network.enable_light_client_server); + assert!(config.chain.enable_light_client_server); }); } @@ -2528,7 +2527,7 @@ fn light_client_http_server_enabled() { .flag("light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.http_api.enable_light_client_server, true); + assert!(config.http_api.enable_light_client_server); }); } diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 659dea468d..b243cd6001 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -149,7 +149,7 @@ fn disable_packet_filter_flag() { .flag("disable-packet-filter", None) .run_with_ip() .with_config(|config| { - assert_eq!(config.disable_packet_filter, true); + assert!(config.disable_packet_filter); }); } @@ -159,7 +159,7 @@ fn enable_enr_auto_update_flag() { .flag("enable-enr-auto-update", None) .run_with_ip() .with_config(|config| { - assert_eq!(config.enable_enr_auto_update, true); + assert!(config.enable_enr_auto_update); }); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 587001f77b..c5b303e4d1 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -136,7 +136,7 @@ fn beacon_nodes_tls_certs_flag() { .flag( "beacon-nodes-tls-certs", Some( - vec![ + [ dir.path().join("certificate.crt").to_str().unwrap(), dir.path().join("certificate2.crt").to_str().unwrap(), ] @@ -205,7 +205,7 @@ fn graffiti_file_with_pk_flag() { let mut file = File::create(dir.path().join("graffiti.txt")).expect("Unable to create file"); let new_key = Keypair::random(); let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = format!("{}:nice-graffiti", pubkeybytes.to_string()); + let contents = format!("{}:nice-graffiti", pubkeybytes); file.write_all(contents.as_bytes()) .expect("Unable to write to file"); CommandLineTest::new() @@ -419,13 +419,13 @@ pub fn malloc_tuning_flag() { CommandLineTest::new() .flag("disable-malloc-tuning", None) .run() - .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, false)); + .with_config(|config| assert!(!config.http_metrics.allocator_metrics_enabled)); } #[test] pub fn malloc_tuning_default() { CommandLineTest::new() .run() - .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, true)); + .with_config(|config| assert!(config.http_metrics.allocator_metrics_enabled)); } #[test] fn doppelganger_protection_flag() { diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index 999f3c3141..04e3eafe6e 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -136,7 +136,7 @@ pub fn validator_create_defaults() { count: 1, deposit_gwei: MainnetEthSpec::default_spec().max_effective_balance, mnemonic_path: None, - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), disable_deposits: false, specify_voting_keystore_password: false, eth1_withdrawal_address: None, @@ -201,7 +201,7 @@ pub fn validator_create_disable_deposits() { .flag("--disable-deposits", None) .flag("--builder-proposals", Some("false")) .assert_success(|config| { - assert_eq!(config.disable_deposits, true); + assert!(config.disable_deposits); assert_eq!(config.builder_proposals, Some(false)); }); } @@ -300,7 +300,7 @@ pub fn validator_move_defaults() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -350,7 +350,7 @@ pub fn validator_move_misc_flags_1() { .flag("--src-vc-token", Some("./1.json")) .flag("--dest-vc-url", Some("http://localhost:2")) .flag("--dest-vc-token", Some("./2.json")) - .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--validators", Some(EXAMPLE_PUBKEY_0)) .flag("--builder-proposals", Some("false")) .flag("--prefer-builder-proposals", Some("false")) .assert_success(|config| { @@ -368,7 +368,7 @@ pub fn validator_move_misc_flags_1() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -382,7 +382,7 @@ pub fn validator_move_misc_flags_2() { .flag("--src-vc-token", Some("./1.json")) .flag("--dest-vc-url", Some("http://localhost:2")) .flag("--dest-vc-token", Some("./2.json")) - .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--validators", Some(EXAMPLE_PUBKEY_0)) .flag("--builder-proposals", Some("false")) .flag("--builder-boost-factor", Some("100")) .assert_success(|config| { @@ -400,7 +400,7 @@ pub fn validator_move_misc_flags_2() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -428,7 +428,7 @@ pub fn validator_move_count() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index bebc8fa13b..e0dee9ceb4 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -173,6 +173,8 @@ mod tests { } impl Web3SignerRig { + // We need to hold that lock as we want to get the binary only once + #[allow(clippy::await_holding_lock)] pub async fn new(network: &str, listen_address: &str, listen_port: u16) -> Self { GET_WEB3SIGNER_BIN .get_or_init(|| async { @@ -210,7 +212,7 @@ mod tests { keystore_password_file: keystore_password_filename.to_string(), }; let key_config_file = - File::create(&keystore_dir.path().join("key-config.yaml")).unwrap(); + File::create(keystore_dir.path().join("key-config.yaml")).unwrap(); serde_yaml::to_writer(key_config_file, &key_config).unwrap(); let tls_keystore_file = tls_dir().join("web3signer").join("key.p12"); diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index 027b10e246..7ea3d7ebaa 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -53,8 +53,10 @@ struct ApiTester { impl ApiTester { pub async fn new() -> Self { - let mut config = ValidatorStoreConfig::default(); - config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); + let config = ValidatorStoreConfig { + fee_recipient: Some(TEST_DEFAULT_FEE_RECIPIENT), + ..Default::default() + }; Self::new_with_config(config).await } @@ -139,7 +141,7 @@ impl ApiTester { let (listening_socket, server) = super::serve(ctx, test_runtime.task_executor.exit()).unwrap(); - tokio::spawn(async { server.await }); + tokio::spawn(server); let url = SensitiveUrl::parse(&format!( "http://{}:{}", @@ -345,22 +347,21 @@ impl ApiTester { .set_nextaccount(s.key_derivation_path_offset) .unwrap(); - for i in 0..s.count { + for validator in response.iter().take(s.count) { let keypairs = wallet .next_validator(PASSWORD_BYTES, PASSWORD_BYTES, PASSWORD_BYTES) .unwrap(); let voting_keypair = keypairs.voting.decrypt_keypair(PASSWORD_BYTES).unwrap(); assert_eq!( - response[i].voting_pubkey, + validator.voting_pubkey, voting_keypair.pk.clone().into(), "the locally generated voting pk should match the server response" ); let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); - let deposit_bytes = - serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); + let deposit_bytes = serde_utils::hex::decode(&validator.eth1_deposit_tx_data).unwrap(); let (deposit_data, _) = decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index 2dde087a7f..6559a2bb9e 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -130,7 +130,7 @@ fn check_keystore_get_response<'a>( for (ks1, ks2) in response.data.iter().zip_eq(expected_keystores) { assert_eq!(ks1.validating_pubkey, keystore_pubkey(ks2)); assert_eq!(ks1.derivation_path, ks2.path()); - assert!(ks1.readonly == None || ks1.readonly == Some(false)); + assert!(ks1.readonly.is_none() || ks1.readonly == Some(false)); } } @@ -147,7 +147,7 @@ fn check_keystore_import_response( } } -fn check_keystore_delete_response<'a>( +fn check_keystore_delete_response( response: &DeleteKeystoresResponse, expected_statuses: impl IntoIterator, ) { @@ -634,7 +634,7 @@ async fn check_get_set_fee_recipient() { assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: TEST_DEFAULT_FEE_RECIPIENT, } ); @@ -654,7 +654,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[1], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_public_key_1.clone(), + ethaddress: fee_recipient_public_key_1, }, ) .await @@ -667,14 +667,14 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_public_key_1.clone() + fee_recipient_public_key_1 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -686,7 +686,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[2], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_public_key_2.clone(), + ethaddress: fee_recipient_public_key_2, }, ) .await @@ -699,16 +699,16 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_public_key_1.clone() + fee_recipient_public_key_1 } else if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -720,7 +720,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[1], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_override.clone(), + ethaddress: fee_recipient_override, }, ) .await @@ -732,16 +732,16 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_override.clone() + fee_recipient_override } else if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -761,14 +761,14 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -814,7 +814,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: DEFAULT_GAS_LIMIT, } ); @@ -843,14 +843,14 @@ async fn check_get_set_gas_limit() { .await .expect("should get gas limit"); let expected = if i == 1 { - gas_limit_public_key_1.clone() + gas_limit_public_key_1 } else { DEFAULT_GAS_LIMIT }; assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -884,7 +884,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -917,7 +917,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -944,7 +944,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -1305,7 +1305,7 @@ async fn delete_concurrent_with_signing() { let handle = handle.spawn(async move { for j in 0..num_attestations { let mut att = make_attestation(j, j + 1); - for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { + for public_key in thread_pubkeys.iter() { let _ = validator_store .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) .await; @@ -2084,7 +2084,7 @@ async fn import_remotekey_web3signer_disabled() { web3signer_req.enable = false; // Import web3signers. - let _ = tester + tester .client .post_lighthouse_validators_web3signer(&vec![web3signer_req]) .await @@ -2148,8 +2148,11 @@ async fn import_remotekey_web3signer_enabled() { // 1 validator imported. assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); - let vals = tester.initialized_validators.read(); - let web3_vals = vals.validator_definitions(); + let web3_vals = tester + .initialized_validators + .read() + .validator_definitions() + .to_vec(); // Import remotekeys. let import_res = tester @@ -2166,11 +2169,13 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); - let vals = tester.initialized_validators.read(); - let remote_vals = vals.validator_definitions(); + { + let vals = tester.initialized_validators.read(); + let remote_vals = vals.validator_definitions(); - // Web3signer should not be overwritten since it is enabled. - assert!(web3_vals == remote_vals); + // Web3signer should not be overwritten since it is enabled. + assert!(web3_vals == remote_vals); + } // Remotekey should not be imported. let expected_responses = vec![SingleListRemotekeysResponse { diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 2e8821f0db..3cebc10bb3 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -520,7 +520,7 @@ pub mod tests { let local_validators: Vec = { let contents = - fs::read_to_string(&self.import_config.validators_file_path.unwrap()) + fs::read_to_string(self.import_config.validators_file_path.unwrap()) .unwrap(); serde_json::from_str(&contents).unwrap() }; @@ -557,7 +557,7 @@ pub mod tests { self.vc.ensure_key_cache_consistency().await; let local_keystore: Keystore = - Keystore::from_json_file(&self.import_config.keystore_file_path.unwrap()) + Keystore::from_json_file(self.import_config.keystore_file_path.unwrap()) .unwrap(); let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index c039728e6f..4d0820f5a8 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -978,13 +978,13 @@ mod test { }) .unwrap(); // Set all definitions to use the same password path as the primary. - definitions.iter_mut().enumerate().for_each(|(_, def)| { - match &mut def.signing_definition { - SigningDefinition::LocalKeystore { - voting_keystore_password_path: Some(path), - .. - } => *path = primary_path.clone(), - _ => (), + definitions.iter_mut().for_each(|def| { + if let SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } = &mut def.signing_definition + { + *path = primary_path.clone() } }) } From d74b2d96f58b97d807eff30e4fb1c0b964e7e6dd Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 17 Dec 2024 07:44:24 +0530 Subject: [PATCH 17/25] Electra alpha8 spec updates (#6496) * Fix partial withdrawals count * Remove get_active_balance * Remove queue_entire_balance_and_reset_validator * Switch to compounding when consolidating with source==target * Queue deposit requests and apply them during epoch processing * Fix ef tests * Clear todos * Fix engine api formatting issues * Merge branch 'unstable' into electra-alpha7 * Make add_validator_to_registry more in line with the spec * Address some review comments * Cleanup * Update initialize_beacon_state_from_eth1 * Merge branch 'unstable' into electra-alpha7 * Fix rpc decoding for blobs by range/root * Fix block hash computation * Fix process_deposits bug * Merge branch 'unstable' into electra-alpha7 * Fix topup deposit processing bug * Update builder api for electra * Refactor mock builder to separate functionality * Merge branch 'unstable' into electra-alpha7 * Address review comments * Use copied for reference rather than cloned * Optimise and simplify PendingDepositsContext::new * Merge remote-tracking branch 'origin/unstable' into electra-alpha7 * Fix processing of deposits with invalid signatures * Remove redundant code in genesis init * Revert "Refactor mock builder to separate functionality" This reverts commit 6d10456912b3c39b8a8c9089db76e8ead20608a0. * Revert "Update builder api for electra" This reverts commit c5c9aca6db201c09c995756a11e9cb6a03b2ea99. * Simplify pre-activation sorting * Fix stale validators used in upgrade_to_electra * Merge branch 'unstable' into electra-alpha7 --- beacon_node/execution_layer/src/block_hash.rs | 44 +-- .../execution_layer/src/engine_api/http.rs | 2 +- .../src/engine_api/json_structures.rs | 30 +- .../src/engine_api/new_payload_request.rs | 11 +- .../src/test_utils/handle_rpc.rs | 2 +- beacon_node/store/src/partial_beacon_state.rs | 4 +- .../src/per_block_processing.rs | 4 +- .../process_operations.rs | 151 +++++++--- .../src/per_epoch_processing/errors.rs | 1 + .../src/per_epoch_processing/single_pass.rs | 262 +++++++++++++----- .../state_processing/src/upgrade/electra.rs | 47 +++- consensus/types/src/beacon_state.rs | 123 +++----- consensus/types/src/beacon_state/tests.rs | 37 --- consensus/types/src/deposit_request.rs | 8 +- consensus/types/src/eth_spec.rs | 23 +- consensus/types/src/execution_block_header.rs | 9 + consensus/types/src/execution_requests.rs | 40 ++- consensus/types/src/lib.rs | 6 +- ..._balance_deposit.rs => pending_deposit.rs} | 12 +- consensus/types/src/preset.rs | 2 +- consensus/types/src/validator.rs | 21 +- testing/ef_tests/Makefile | 2 +- .../ef_tests/src/cases/epoch_processing.rs | 6 +- testing/ef_tests/src/type_name.rs | 2 +- testing/ef_tests/tests/tests.rs | 7 +- 25 files changed, 519 insertions(+), 337 deletions(-) rename consensus/types/src/{pending_balance_deposit.rs => pending_deposit.rs} (68%) diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index cdc172cff4..d3a32c7929 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -7,7 +7,7 @@ use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ EncodableExecutionBlockHeader, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, - ExecutionPayloadRef, Hash256, + ExecutionPayloadRef, ExecutionRequests, Hash256, }; /// Calculate the block hash of an execution block. @@ -17,6 +17,7 @@ use types::{ pub fn calculate_execution_block_hash( payload: ExecutionPayloadRef, parent_beacon_block_root: Option, + execution_requests: Option<&ExecutionRequests>, ) -> (ExecutionBlockHash, Hash256) { // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a @@ -38,6 +39,7 @@ pub fn calculate_execution_block_hash( let rlp_blob_gas_used = payload.blob_gas_used().ok(); let rlp_excess_blob_gas = payload.excess_blob_gas().ok(); + let requests_root = execution_requests.map(|requests| requests.requests_hash()); // Construct the block header. let exec_block_header = ExecutionBlockHeader::from_payload( @@ -48,6 +50,7 @@ pub fn calculate_execution_block_hash( rlp_blob_gas_used, rlp_excess_blob_gas, parent_beacon_block_root, + requests_root, ); // Hash the RLP encoding of the block header. @@ -118,6 +121,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_hash = @@ -149,6 +153,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_hash = @@ -181,6 +186,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_hash = Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") @@ -211,6 +217,7 @@ mod test { blob_gas_used: Some(0x0u64), excess_blob_gas: Some(0x0u64), parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + requests_root: None, }; let expected_hash = Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") @@ -221,29 +228,30 @@ mod test { #[test] fn test_rlp_encode_block_electra() { let header = ExecutionBlockHeader { - parent_hash: Hash256::from_str("172864416698b842f4c92f7b476be294b4ef720202779df194cd225f531053ab").unwrap(), + parent_hash: Hash256::from_str("a628f146df398a339768bd101f7dc41d828be79aca5dd02cc878a51bdbadd761").unwrap(), ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), - beneficiary: Address::from_str("878705ba3f8bc32fcf7f4caa1a35e72af65cf766").unwrap(), - state_root: Hash256::from_str("c6457d0df85c84c62d1c68f68138b6e796e8a44fb44de221386fb2d5611c41e0").unwrap(), - transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), + beneficiary: Address::from_str("f97e180c050e5ab072211ad2c213eb5aee4df134").unwrap(), + state_root: Hash256::from_str("fdff009f8280bd113ebb4df8ce4e2dcc9322d43184a0b506e70b7f4823ca1253").unwrap(), + transactions_root: Hash256::from_str("452806578b4fa881cafb019c47e767e37e2249accf859159f00cddefb2579bb5").unwrap(), + receipts_root: Hash256::from_str("72ceac0f16a32041c881b3220d39ca506a286bef163c01a4d0821cd4027d31c7").unwrap(), + logs_bloom:<[u8; 256]>::from_hex("10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000").unwrap().into(), difficulty: Uint256::ZERO, - number: Uint256::from(97), - gas_limit: Uint256::from(27482534), - gas_used: Uint256::ZERO, - timestamp: 1692132829u64, - extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(), - mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(), + number: Uint256::from(8230), + gas_limit: Uint256::from(30000000), + gas_used: Uint256::from(3716848), + timestamp: 1730921268, + extra_data: hex::decode("d883010e0c846765746888676f312e32332e32856c696e7578").unwrap(), + mix_hash: Hash256::from_str("e87ca9a45b2e61bbe9080d897db1d584b5d2367d22e898af901091883b7b96ec").unwrap(), nonce: Hash64::ZERO, - base_fee_per_gas: Uint256::from(2374u64), + base_fee_per_gas: Uint256::from(7u64), withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()), - blob_gas_used: Some(0x0u64), - excess_blob_gas: Some(0x0u64), - parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + blob_gas_used: Some(786432), + excess_blob_gas: Some(44695552), + parent_beacon_block_root: Some(Hash256::from_str("f3a888fee010ebb1ae083547004e96c254b240437823326fdff8354b1fc25629").unwrap()), + requests_root: Some(Hash256::from_str("9440d3365f07573919e1e9ac5178c20ec6fe267357ee4baf8b6409901f331b62").unwrap()), }; let expected_hash = - Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") + Hash256::from_str("61e67afc96bf21be6aab52c1ace1db48de7b83f03119b0644deb4b69e87e09e1") .unwrap(); test_rlp_encoding(&header, None, expected_hash); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index d4734be448..33dc60d037 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -812,7 +812,7 @@ impl HttpJsonRpc { new_payload_request_electra.versioned_hashes, new_payload_request_electra.parent_beacon_block_root, new_payload_request_electra - .execution_requests_list + .execution_requests .get_execution_requests_list(), ]); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index efd68f1023..1c6639804e 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -6,7 +6,9 @@ use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; -use types::execution_requests::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; +use types::execution_requests::{ + ConsolidationRequests, DepositRequests, RequestPrefix, WithdrawalRequests, +}; use types::{Blob, FixedVector, KzgProof, Unsigned}; #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -339,25 +341,6 @@ impl From> for ExecutionPayload { } } -/// This is used to index into the `execution_requests` array. -#[derive(Debug, Copy, Clone)] -enum RequestPrefix { - Deposit, - Withdrawal, - Consolidation, -} - -impl RequestPrefix { - pub fn from_prefix(prefix: u8) -> Option { - match prefix { - 0 => Some(Self::Deposit), - 1 => Some(Self::Withdrawal), - 2 => Some(Self::Consolidation), - _ => None, - } - } -} - /// Format of `ExecutionRequests` received over the engine api. /// /// Array of ssz-encoded requests list encoded as hex bytes. @@ -379,7 +362,8 @@ impl TryFrom for ExecutionRequests { for (i, request) in value.0.into_iter().enumerate() { // hex string - let decoded_bytes = hex::decode(request).map_err(|e| format!("Invalid hex {:?}", e))?; + let decoded_bytes = hex::decode(request.strip_prefix("0x").unwrap_or(&request)) + .map_err(|e| format!("Invalid hex {:?}", e))?; match RequestPrefix::from_prefix(i as u8) { Some(RequestPrefix::Deposit) => { requests.deposits = DepositRequests::::from_ssz_bytes(&decoded_bytes) @@ -431,7 +415,7 @@ pub struct JsonGetPayloadResponse { #[superstruct(only(V3, V4))] pub should_override_builder: bool, #[superstruct(only(V4))] - pub requests: JsonExecutionRequests, + pub execution_requests: JsonExecutionRequests, } impl TryFrom> for GetPayloadResponse { @@ -464,7 +448,7 @@ impl TryFrom> for GetPayloadResponse { block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - requests: response.requests.try_into()?, + requests: response.execution_requests.try_into()?, })) } } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 318779b7f3..60bc848974 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -44,7 +44,7 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { #[superstruct(only(Deneb, Electra))] pub parent_beacon_block_root: Hash256, #[superstruct(only(Electra))] - pub execution_requests_list: &'block ExecutionRequests, + pub execution_requests: &'block ExecutionRequests, } impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { @@ -121,8 +121,11 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); - let (header_hash, rlp_transactions_root) = - calculate_execution_block_hash(payload, parent_beacon_block_root); + let (header_hash, rlp_transactions_root) = calculate_execution_block_hash( + payload, + parent_beacon_block_root, + self.execution_requests().ok().copied(), + ); if header_hash != self.block_hash() { return Err(Error::BlockHashMismatch { @@ -185,7 +188,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .map(kzg_commitment_to_versioned_hash) .collect(), parent_beacon_block_root: block_ref.parent_root, - execution_requests_list: &block_ref.body.execution_requests, + execution_requests: &block_ref.body.execution_requests, })), } } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 786ac9ad9c..9365024ffb 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -374,7 +374,7 @@ pub async fn handle_rpc( .into(), should_override_builder: false, // TODO(electra): add EL requests in mock el - requests: Default::default(), + execution_requests: Default::default(), }) .unwrap() } diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 2eb40f47b1..22eecdcc60 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -136,7 +136,7 @@ where pub earliest_consolidation_epoch: Epoch, #[superstruct(only(Electra))] - pub pending_balance_deposits: List, + pub pending_deposits: List, #[superstruct(only(Electra))] pub pending_partial_withdrawals: List, @@ -403,7 +403,7 @@ impl TryInto> for PartialBeaconState { earliest_exit_epoch, consolidation_balance_to_consume, earliest_consolidation_epoch, - pending_balance_deposits, + pending_deposits, pending_partial_withdrawals, pending_consolidations ], diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index f289b6e081..436f4934b9 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -514,6 +514,7 @@ pub fn get_expected_withdrawals( // Consume pending partial withdrawals let partial_withdrawals_count = if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { + let mut partial_withdrawals_count = 0; for withdrawal in partial_withdrawals { if withdrawal.withdrawable_epoch > epoch || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize @@ -546,8 +547,9 @@ pub fn get_expected_withdrawals( }); withdrawal_index.safe_add_assign(1)?; } + partial_withdrawals_count.safe_add_assign(1)?; } - Some(withdrawals.len()) + Some(partial_withdrawals_count) } else { None }; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index a53dc15126..22d8592364 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -7,7 +7,6 @@ use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; use types::typenum::U33; -use types::validator::is_compounding_withdrawal_credential; pub fn process_operations>( state: &mut BeaconState, @@ -378,7 +377,7 @@ pub fn process_deposits( if state.eth1_deposit_index() < eth1_deposit_index_limit { let expected_deposit_len = std::cmp::min( E::MaxDeposits::to_u64(), - state.get_outstanding_deposit_len()?, + eth1_deposit_index_limit.safe_sub(state.eth1_deposit_index())?, ); block_verify!( deposits.len() as u64 == expected_deposit_len, @@ -450,39 +449,46 @@ pub fn apply_deposit( if let Some(index) = validator_index { // [Modified in Electra:EIP7251] - if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { - pending_balance_deposits.push(PendingBalanceDeposit { index, amount })?; - - let validator = state - .validators() - .get(index as usize) - .ok_or(BeaconStateError::UnknownValidator(index as usize))?; - - if is_compounding_withdrawal_credential(deposit_data.withdrawal_credentials, spec) - && validator.has_eth1_withdrawal_credential(spec) - && is_valid_deposit_signature(&deposit_data, spec).is_ok() - { - state.switch_to_compounding_validator(index as usize, spec)?; - } + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, + amount, + signature: deposit_data.signature, + slot: spec.genesis_slot, // Use `genesis_slot` to distinguish from a pending deposit request + })?; } else { // Update the existing validator balance. increase_balance(state, index as usize, amount)?; } - } else { + } + // New validator + else { // The signature should be checked for new validators. Return early for a bad // signature. if is_valid_deposit_signature(&deposit_data, spec).is_err() { return Ok(()); } - state.add_validator_to_registry(&deposit_data, spec)?; - let new_validator_index = state.validators().len().safe_sub(1)? as u64; + state.add_validator_to_registry( + deposit_data.pubkey, + deposit_data.withdrawal_credentials, + if state.fork_name_unchecked() >= ForkName::Electra { + 0 + } else { + amount + }, + spec, + )?; // [New in Electra:EIP7251] - if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { - pending_balance_deposits.push(PendingBalanceDeposit { - index: new_validator_index, + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, amount, + signature: deposit_data.signature, + slot: spec.genesis_slot, // Use `genesis_slot` to distinguish from a pending deposit request })?; } } @@ -596,13 +602,18 @@ pub fn process_deposit_requests( if state.deposit_requests_start_index()? == spec.unset_deposit_requests_start_index { *state.deposit_requests_start_index_mut()? = request.index } - let deposit_data = DepositData { - pubkey: request.pubkey, - withdrawal_credentials: request.withdrawal_credentials, - amount: request.amount, - signature: request.signature.clone().into(), - }; - apply_deposit(state, deposit_data, None, false, spec)? + let slot = state.slot(); + + // [New in Electra:EIP7251] + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: request.pubkey, + withdrawal_credentials: request.withdrawal_credentials, + amount: request.amount, + signature: request.signature.clone(), + slot, + })?; + } } Ok(()) @@ -621,11 +632,84 @@ pub fn process_consolidation_requests( Ok(()) } +fn is_valid_switch_to_compounding_request( + state: &BeaconState, + consolidation_request: &ConsolidationRequest, + spec: &ChainSpec, +) -> Result { + // Switch to compounding requires source and target be equal + if consolidation_request.source_pubkey != consolidation_request.target_pubkey { + return Ok(false); + } + + // Verify pubkey exists + let Some(source_index) = state + .pubkey_cache() + .get(&consolidation_request.source_pubkey) + else { + // source validator doesn't exist + return Ok(false); + }; + + let source_validator = state.get_validator(source_index)?; + // Verify the source withdrawal credentials + // Note: We need to specifically check for eth1 withdrawal credentials here + // If the validator is already compounding, the compounding request is not valid. + if let Some(withdrawal_address) = source_validator + .has_eth1_withdrawal_credential(spec) + .then(|| { + source_validator + .withdrawal_credentials + .as_slice() + .get(12..) + .map(Address::from_slice) + }) + .flatten() + { + if withdrawal_address != consolidation_request.source_address { + return Ok(false); + } + } else { + // Source doesn't have eth1 withdrawal credentials + return Ok(false); + } + + // Verify the source is active + let current_epoch = state.current_epoch(); + if !source_validator.is_active_at(current_epoch) { + return Ok(false); + } + // Verify exits for source has not been initiated + if source_validator.exit_epoch != spec.far_future_epoch { + return Ok(false); + } + + Ok(true) +} + pub fn process_consolidation_request( state: &mut BeaconState, consolidation_request: &ConsolidationRequest, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + if is_valid_switch_to_compounding_request(state, consolidation_request, spec)? { + let Some(source_index) = state + .pubkey_cache() + .get(&consolidation_request.source_pubkey) + else { + // source validator doesn't exist. This is unreachable as `is_valid_switch_to_compounding_request` + // will return false in that case. + return Ok(()); + }; + state.switch_to_compounding_validator(source_index, spec)?; + return Ok(()); + } + + // Verify that source != target, so a consolidation cannot be used as an exit. + if consolidation_request.source_pubkey == consolidation_request.target_pubkey { + return Ok(()); + } + // If the pending consolidations queue is full, consolidation requests are ignored if state.pending_consolidations()?.len() == E::PendingConsolidationsLimit::to_usize() { return Ok(()); @@ -649,10 +733,6 @@ pub fn process_consolidation_request( // target validator doesn't exist return Ok(()); }; - // Verify that source != target, so a consolidation cannot be used as an exit. - if source_index == target_index { - return Ok(()); - } let source_validator = state.get_validator(source_index)?; // Verify the source withdrawal credentials @@ -699,5 +779,10 @@ pub fn process_consolidation_request( target_index: target_index as u64, })?; + let target_validator = state.get_validator(target_index)?; + // Churn any target excess active balance of target and raise its max + if target_validator.has_eth1_withdrawal_credential(spec) { + state.switch_to_compounding_validator(target_index, spec)?; + } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index b6c9dbea52..f45c55a7ac 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -28,6 +28,7 @@ pub enum EpochProcessingError { SinglePassMissingActivationQueue, MissingEarliestExitEpoch, MissingExitBalanceToConsume, + PendingDepositsLogicError, } impl From for EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index fcb480a37c..904e68e368 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -4,6 +4,7 @@ use crate::{ update_progressive_balances_cache::initialize_progressive_balances_cache, }, epoch_cache::{initialize_epoch_cache, PreEpochCache}, + per_block_processing::is_valid_deposit_signature, per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; use itertools::izip; @@ -16,9 +17,9 @@ use types::{ TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, milhouse::Cow, - ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, - ExitCache, ForkName, List, ParticipationFlags, PendingBalanceDeposit, ProgressiveBalancesCache, - RelativeEpoch, Unsigned, Validator, + ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, + EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, + ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, }; pub struct SinglePassConfig { @@ -26,7 +27,7 @@ pub struct SinglePassConfig { pub rewards_and_penalties: bool, pub registry_updates: bool, pub slashings: bool, - pub pending_balance_deposits: bool, + pub pending_deposits: bool, pub pending_consolidations: bool, pub effective_balance_updates: bool, } @@ -44,7 +45,7 @@ impl SinglePassConfig { rewards_and_penalties: true, registry_updates: true, slashings: true, - pending_balance_deposits: true, + pending_deposits: true, pending_consolidations: true, effective_balance_updates: true, } @@ -56,7 +57,7 @@ impl SinglePassConfig { rewards_and_penalties: false, registry_updates: false, slashings: false, - pending_balance_deposits: false, + pending_deposits: false, pending_consolidations: false, effective_balance_updates: false, } @@ -85,15 +86,17 @@ struct SlashingsContext { penalty_per_effective_balance_increment: u64, } -struct PendingBalanceDepositsContext { +struct PendingDepositsContext { /// The value to set `next_deposit_index` to *after* processing completes. next_deposit_index: usize, /// The value to set `deposit_balance_to_consume` to *after* processing completes. deposit_balance_to_consume: u64, /// Total balance increases for each validator due to pending balance deposits. validator_deposits_to_process: HashMap, - /// The deposits to append to `pending_balance_deposits` after processing all applicable deposits. - deposits_to_postpone: Vec, + /// The deposits to append to `pending_deposits` after processing all applicable deposits. + deposits_to_postpone: Vec, + /// New validators to be added to the state *after* processing completes. + new_validator_deposits: Vec, } struct EffectiveBalancesContext { @@ -138,6 +141,7 @@ pub fn process_epoch_single_pass( state.build_exit_cache(spec)?; state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.update_pubkey_cache()?; let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); @@ -163,12 +167,11 @@ pub fn process_epoch_single_pass( let slashings_ctxt = &SlashingsContext::new(state, state_ctxt, spec)?; let mut next_epoch_cache = PreEpochCache::new_for_next_epoch(state)?; - let pending_balance_deposits_ctxt = - if fork_name.electra_enabled() && conf.pending_balance_deposits { - Some(PendingBalanceDepositsContext::new(state, spec)?) - } else { - None - }; + let pending_deposits_ctxt = if fork_name.electra_enabled() && conf.pending_deposits { + Some(PendingDepositsContext::new(state, spec, &conf)?) + } else { + None + }; let mut earliest_exit_epoch = state.earliest_exit_epoch().ok(); let mut exit_balance_to_consume = state.exit_balance_to_consume().ok(); @@ -303,9 +306,9 @@ pub fn process_epoch_single_pass( process_single_slashing(&mut balance, &validator, slashings_ctxt, state_ctxt, spec)?; } - // `process_pending_balance_deposits` - if let Some(pending_balance_deposits_ctxt) = &pending_balance_deposits_ctxt { - process_pending_balance_deposits_for_validator( + // `process_pending_deposits` + if let Some(pending_balance_deposits_ctxt) = &pending_deposits_ctxt { + process_pending_deposits_for_validator( &mut balance, validator_info, pending_balance_deposits_ctxt, @@ -342,20 +345,84 @@ pub fn process_epoch_single_pass( // Finish processing pending balance deposits if relevant. // // This *could* be reordered after `process_pending_consolidations` which pushes only to the end - // of the `pending_balance_deposits` list. But we may as well preserve the write ordering used + // of the `pending_deposits` list. But we may as well preserve the write ordering used // by the spec and do this first. - if let Some(ctxt) = pending_balance_deposits_ctxt { - let mut new_pending_balance_deposits = List::try_from_iter( + if let Some(ctxt) = pending_deposits_ctxt { + let mut new_balance_deposits = List::try_from_iter( state - .pending_balance_deposits()? + .pending_deposits()? .iter_from(ctxt.next_deposit_index)? .cloned(), )?; for deposit in ctxt.deposits_to_postpone { - new_pending_balance_deposits.push(deposit)?; + new_balance_deposits.push(deposit)?; } - *state.pending_balance_deposits_mut()? = new_pending_balance_deposits; + *state.pending_deposits_mut()? = new_balance_deposits; *state.deposit_balance_to_consume_mut()? = ctxt.deposit_balance_to_consume; + + // `new_validator_deposits` may contain multiple deposits with the same pubkey where + // the first deposit creates the new validator and the others are topups. + // Each item in the vec is a (pubkey, validator_index) + let mut added_validators = Vec::new(); + for deposit in ctxt.new_validator_deposits { + let deposit_data = DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: deposit.signature, + }; + // Only check the signature if this is the first deposit for the validator, + // following the logic from `apply_pending_deposit` in the spec. + if let Some(validator_index) = state.get_validator_index(&deposit_data.pubkey)? { + state + .get_balance_mut(validator_index)? + .safe_add_assign(deposit_data.amount)?; + } else if is_valid_deposit_signature(&deposit_data, spec).is_ok() { + // Apply the new deposit to the state + let validator_index = state.add_validator_to_registry( + deposit_data.pubkey, + deposit_data.withdrawal_credentials, + deposit_data.amount, + spec, + )?; + added_validators.push((deposit_data.pubkey, validator_index)); + } + } + if conf.effective_balance_updates { + // Re-process effective balance updates for validators affected by top-up of new validators. + let ( + validators, + balances, + _, + current_epoch_participation, + _, + progressive_balances, + _, + _, + ) = state.mutable_validator_fields()?; + for (_, validator_index) in added_validators.iter() { + let balance = *balances + .get(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + let mut validator = validators + .get_cow(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + let validator_current_epoch_participation = *current_epoch_participation + .get(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + process_single_effective_balance_update( + *validator_index, + balance, + &mut validator, + validator_current_epoch_participation, + &mut next_epoch_cache, + progressive_balances, + effective_balances_ctxt, + state_ctxt, + spec, + )?; + } + } } // Process consolidations outside the single-pass loop, as they depend on balances for multiple @@ -819,8 +886,12 @@ fn process_single_slashing( Ok(()) } -impl PendingBalanceDepositsContext { - fn new(state: &BeaconState, spec: &ChainSpec) -> Result { +impl PendingDepositsContext { + fn new( + state: &BeaconState, + spec: &ChainSpec, + config: &SinglePassConfig, + ) -> Result { let available_for_processing = state .deposit_balance_to_consume()? .safe_add(state.get_activation_exit_churn_limit(spec)?)?; @@ -830,10 +901,31 @@ impl PendingBalanceDepositsContext { let mut next_deposit_index = 0; let mut validator_deposits_to_process = HashMap::new(); let mut deposits_to_postpone = vec![]; + let mut new_validator_deposits = vec![]; + let mut is_churn_limit_reached = false; + let finalized_slot = state + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()); - let pending_balance_deposits = state.pending_balance_deposits()?; + let pending_deposits = state.pending_deposits()?; - for deposit in pending_balance_deposits.iter() { + for deposit in pending_deposits.iter() { + // Do not process deposit requests if the Eth1 bridge deposits are not yet applied. + if deposit.slot > spec.genesis_slot + && state.eth1_deposit_index() < state.deposit_requests_start_index()? + { + break; + } + // Do not process is deposit slot has not been finalized. + if deposit.slot > finalized_slot { + break; + } + // Do not process if we have reached the limit for the number of deposits + // processed in an epoch. + if next_deposit_index >= E::max_pending_deposits_per_epoch() { + break; + } // We have to do a bit of indexing into `validators` here, but I can't see any way // around that without changing the spec. // @@ -844,48 +936,70 @@ impl PendingBalanceDepositsContext { // take, just whether it is non-default. Nor do we need to know the value of // `withdrawable_epoch`, because `next_epoch <= withdrawable_epoch` will evaluate to // `true` both for the actual value & the default placeholder value (`FAR_FUTURE_EPOCH`). - let validator = state.get_validator(deposit.index as usize)?; - let already_exited = validator.exit_epoch < spec.far_future_epoch; - // In the spec process_registry_updates is called before process_pending_balance_deposits - // so we must account for process_registry_updates ejecting the validator for low balance - // and setting the exit_epoch to < far_future_epoch. Note that in the spec the effective - // balance update does not happen until *after* the registry update, so we don't need to - // account for changes to the effective balance that would push it below the ejection - // balance here. - let will_be_exited = validator.is_active_at(current_epoch) - && validator.effective_balance <= spec.ejection_balance; - if already_exited || will_be_exited { - if next_epoch <= validator.withdrawable_epoch { - deposits_to_postpone.push(deposit.clone()); - } else { - // Deposited balance will never become active. Increase balance but do not - // consume churn. - validator_deposits_to_process - .entry(deposit.index as usize) - .or_insert(0) - .safe_add_assign(deposit.amount)?; - } - } else { - // Deposit does not fit in the churn, no more deposit processing in this epoch. - if processed_amount.safe_add(deposit.amount)? > available_for_processing { - break; - } - // Deposit fits in the churn, process it. Increase balance and consume churn. + let mut is_validator_exited = false; + let mut is_validator_withdrawn = false; + let opt_validator_index = state.pubkey_cache().get(&deposit.pubkey); + if let Some(validator_index) = opt_validator_index { + let validator = state.get_validator(validator_index)?; + let already_exited = validator.exit_epoch < spec.far_future_epoch; + // In the spec process_registry_updates is called before process_pending_deposits + // so we must account for process_registry_updates ejecting the validator for low balance + // and setting the exit_epoch to < far_future_epoch. Note that in the spec the effective + // balance update does not happen until *after* the registry update, so we don't need to + // account for changes to the effective balance that would push it below the ejection + // balance here. + // Note: we only consider this if registry_updates are enabled in the config. + // EF tests require us to run epoch_processing functions in isolation. + let will_be_exited = config.registry_updates + && (validator.is_active_at(current_epoch) + && validator.effective_balance <= spec.ejection_balance); + is_validator_exited = already_exited || will_be_exited; + is_validator_withdrawn = validator.withdrawable_epoch < next_epoch; + } + + if is_validator_withdrawn { + // Deposited balance will never become active. Queue a balance increase but do not + // consume churn. Validator index must be known if the validator is known to be + // withdrawn (see calculation of `is_validator_withdrawn` above). + let validator_index = + opt_validator_index.ok_or(Error::PendingDepositsLogicError)?; validator_deposits_to_process - .entry(deposit.index as usize) + .entry(validator_index) .or_insert(0) .safe_add_assign(deposit.amount)?; + } else if is_validator_exited { + // Validator is exiting, postpone the deposit until after withdrawable epoch + deposits_to_postpone.push(deposit.clone()); + } else { + // Check if deposit fits in the churn, otherwise, do no more deposit processing in this epoch. + is_churn_limit_reached = + processed_amount.safe_add(deposit.amount)? > available_for_processing; + if is_churn_limit_reached { + break; + } processed_amount.safe_add_assign(deposit.amount)?; + + // Deposit fits in the churn, process it. Increase balance and consume churn. + if let Some(validator_index) = state.pubkey_cache().get(&deposit.pubkey) { + validator_deposits_to_process + .entry(validator_index) + .or_insert(0) + .safe_add_assign(deposit.amount)?; + } else { + // The `PendingDeposit` is for a new validator + new_validator_deposits.push(deposit.clone()); + } } // Regardless of how the deposit was handled, we move on in the queue. next_deposit_index.safe_add_assign(1)?; } - let deposit_balance_to_consume = if next_deposit_index == pending_balance_deposits.len() { - 0 - } else { + // Accumulate churn only if the churn limit has been hit. + let deposit_balance_to_consume = if is_churn_limit_reached { available_for_processing.safe_sub(processed_amount)? + } else { + 0 }; Ok(Self { @@ -893,14 +1007,15 @@ impl PendingBalanceDepositsContext { deposit_balance_to_consume, validator_deposits_to_process, deposits_to_postpone, + new_validator_deposits, }) } } -fn process_pending_balance_deposits_for_validator( +fn process_pending_deposits_for_validator( balance: &mut Cow, validator_info: &ValidatorInfo, - pending_balance_deposits_ctxt: &PendingBalanceDepositsContext, + pending_balance_deposits_ctxt: &PendingDepositsContext, ) -> Result<(), Error> { if let Some(deposit_amount) = pending_balance_deposits_ctxt .validator_deposits_to_process @@ -941,21 +1056,20 @@ fn process_pending_consolidations( break; } - // Calculate the active balance while we have the source validator loaded. This is a safe - // reordering. - let source_balance = *state - .balances() - .get(source_index) - .ok_or(BeaconStateError::UnknownValidator(source_index))?; - let active_balance = - source_validator.get_active_balance(source_balance, spec, state_ctxt.fork_name); - - // Churn any target excess active balance of target and raise its max. - state.switch_to_compounding_validator(target_index, spec)?; + // Calculate the consolidated balance + let max_effective_balance = + source_validator.get_max_effective_balance(spec, state_ctxt.fork_name); + let source_effective_balance = std::cmp::min( + *state + .balances() + .get(source_index) + .ok_or(BeaconStateError::UnknownValidator(source_index))?, + max_effective_balance, + ); // Move active balance to target. Excess balance is withdrawable. - decrease_balance(state, source_index, active_balance)?; - increase_balance(state, target_index, active_balance)?; + decrease_balance(state, source_index, source_effective_balance)?; + increase_balance(state, target_index, source_effective_balance)?; affected_validators.insert(source_index); affected_validators.insert(target_index); diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 1e532d9f10..1e64ef2897 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -1,8 +1,10 @@ +use bls::Signature; +use itertools::Itertools; use safe_arith::SafeArith; use std::mem; use types::{ BeaconState, BeaconStateElectra, BeaconStateError as Error, ChainSpec, Epoch, EpochCache, - EthSpec, Fork, + EthSpec, Fork, PendingDeposit, }; /// Transform a `Deneb` state into an `Electra` state. @@ -38,29 +40,44 @@ pub fn upgrade_to_electra( // Add validators that are not yet active to pending balance deposits let validators = post.validators().clone(); - let mut pre_activation = validators + let pre_activation = validators .iter() .enumerate() .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) + .sorted_by_key(|(index, validator)| (validator.activation_eligibility_epoch, *index)) .collect::>(); - // Sort the indices by activation_eligibility_epoch and then by index - pre_activation.sort_by(|(index_a, val_a), (index_b, val_b)| { - if val_a.activation_eligibility_epoch == val_b.activation_eligibility_epoch { - index_a.cmp(index_b) - } else { - val_a - .activation_eligibility_epoch - .cmp(&val_b.activation_eligibility_epoch) - } - }); - // Process validators to queue entire balance and reset them for (index, _) in pre_activation { - post.queue_entire_balance_and_reset_validator(index, spec)?; + let balance = post + .balances_mut() + .get_mut(index) + .ok_or(Error::UnknownValidator(index))?; + let balance_copy = *balance; + *balance = 0_u64; + + let validator = post + .validators_mut() + .get_mut(index) + .ok_or(Error::UnknownValidator(index))?; + validator.effective_balance = 0; + validator.activation_eligibility_epoch = spec.far_future_epoch; + let pubkey = validator.pubkey; + let withdrawal_credentials = validator.withdrawal_credentials; + + post.pending_deposits_mut()? + .push(PendingDeposit { + pubkey, + withdrawal_credentials, + amount: balance_copy, + signature: Signature::infinity()?.into(), + slot: spec.genesis_slot, + }) + .map_err(Error::MilhouseError)?; } // Ensure early adopters of compounding credentials go through the activation churn + let validators = post.validators().clone(); for (index, validator) in validators.iter().enumerate() { if validator.has_compounding_withdrawal_credential(spec) { post.queue_excess_active_balance(index, spec)?; @@ -137,7 +154,7 @@ pub fn upgrade_state_to_electra( earliest_exit_epoch, consolidation_balance_to_consume: 0, earliest_consolidation_epoch, - pending_balance_deposits: Default::default(), + pending_deposits: Default::default(), pending_partial_withdrawals: Default::default(), pending_consolidations: Default::default(), // Caches diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 833231dca3..77b72b209c 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -509,7 +509,7 @@ where #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] - pub pending_balance_deposits: List, + pub pending_deposits: List, #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] @@ -1547,19 +1547,23 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + /// Add a validator to the registry and return the validator index that was allocated for it. pub fn add_validator_to_registry( &mut self, - deposit_data: &DepositData, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, + amount: u64, spec: &ChainSpec, - ) -> Result<(), Error> { - let fork = self.fork_name_unchecked(); - let amount = if fork.electra_enabled() { - 0 - } else { - deposit_data.amount - }; - self.validators_mut() - .push(Validator::from_deposit(deposit_data, amount, fork, spec))?; + ) -> Result { + let index = self.validators().len(); + let fork_name = self.fork_name_unchecked(); + self.validators_mut().push(Validator::from_deposit( + pubkey, + withdrawal_credentials, + amount, + fork_name, + spec, + ))?; self.balances_mut().push(amount)?; // Altair or later initializations. @@ -1573,7 +1577,20 @@ impl BeaconState { inactivity_scores.push(0)?; } - Ok(()) + // Keep the pubkey cache up to date if it was up to date prior to this call. + // + // Doing this here while we know the pubkey and index is marginally quicker than doing it in + // a call to `update_pubkey_cache` later because we don't need to index into the validators + // tree again. + let pubkey_cache = self.pubkey_cache_mut(); + if pubkey_cache.len() == index { + let success = pubkey_cache.insert(pubkey, index); + if !success { + return Err(Error::PubkeyCacheInconsistent); + } + } + + Ok(index) } /// Safe copy-on-write accessor for the `validators` list. @@ -1780,19 +1797,6 @@ impl BeaconState { } } - /// Get the number of outstanding deposits. - /// - /// Returns `Err` if the state is invalid. - pub fn get_outstanding_deposit_len(&self) -> Result { - self.eth1_data() - .deposit_count - .checked_sub(self.eth1_deposit_index()) - .ok_or(Error::InvalidDepositState { - deposit_count: self.eth1_data().deposit_count, - deposit_index: self.eth1_deposit_index(), - }) - } - /// Build all caches (except the tree hash cache), if they need to be built. pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { self.build_all_committee_caches(spec)?; @@ -2149,27 +2153,6 @@ impl BeaconState { .map_err(Into::into) } - /// Get active balance for the given `validator_index`. - pub fn get_active_balance( - &self, - validator_index: usize, - spec: &ChainSpec, - current_fork: ForkName, - ) -> Result { - let max_effective_balance = self - .validators() - .get(validator_index) - .map(|validator| validator.get_max_effective_balance(spec, current_fork)) - .ok_or(Error::UnknownValidator(validator_index))?; - Ok(std::cmp::min( - *self - .balances() - .get(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?, - max_effective_balance, - )) - } - pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { let mut pending_balance = 0; for withdrawal in self @@ -2196,42 +2179,18 @@ impl BeaconState { if *balance > spec.min_activation_balance { let excess_balance = balance.safe_sub(spec.min_activation_balance)?; *balance = spec.min_activation_balance; - self.pending_balance_deposits_mut()? - .push(PendingBalanceDeposit { - index: validator_index as u64, - amount: excess_balance, - })?; + let validator = self.get_validator(validator_index)?.clone(); + self.pending_deposits_mut()?.push(PendingDeposit { + pubkey: validator.pubkey, + withdrawal_credentials: validator.withdrawal_credentials, + amount: excess_balance, + signature: Signature::infinity()?.into(), + slot: spec.genesis_slot, + })?; } Ok(()) } - pub fn queue_entire_balance_and_reset_validator( - &mut self, - validator_index: usize, - spec: &ChainSpec, - ) -> Result<(), Error> { - let balance = self - .balances_mut() - .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; - let balance_copy = *balance; - *balance = 0_u64; - - let validator = self - .validators_mut() - .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; - validator.effective_balance = 0; - validator.activation_eligibility_epoch = spec.far_future_epoch; - - self.pending_balance_deposits_mut()? - .push(PendingBalanceDeposit { - index: validator_index as u64, - amount: balance_copy, - }) - .map_err(Into::into) - } - /// Change the withdrawal prefix of the given `validator_index` to the compounding withdrawal validator prefix. pub fn switch_to_compounding_validator( &mut self, @@ -2242,12 +2201,10 @@ impl BeaconState { .validators_mut() .get_mut(validator_index) .ok_or(Error::UnknownValidator(validator_index))?; - if validator.has_eth1_withdrawal_credential(spec) { - AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = - spec.compounding_withdrawal_prefix_byte; + AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = + spec.compounding_withdrawal_prefix_byte; - self.queue_excess_active_balance(validator_index, spec)?; - } + self.queue_excess_active_balance(validator_index, spec)?; Ok(()) } diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 3ad3ccf561..bfa7bb86d2 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -307,43 +307,6 @@ mod committees { } } -mod get_outstanding_deposit_len { - use super::*; - - async fn state() -> BeaconState { - get_harness(16, Slot::new(0)) - .await - .chain - .head_beacon_state_cloned() - } - - #[tokio::test] - async fn returns_ok() { - let mut state = state().await; - assert_eq!(state.get_outstanding_deposit_len(), Ok(0)); - - state.eth1_data_mut().deposit_count = 17; - *state.eth1_deposit_index_mut() = 16; - assert_eq!(state.get_outstanding_deposit_len(), Ok(1)); - } - - #[tokio::test] - async fn returns_err_if_the_state_is_invalid() { - let mut state = state().await; - // The state is invalid, deposit count is lower than deposit index. - state.eth1_data_mut().deposit_count = 16; - *state.eth1_deposit_index_mut() = 17; - - assert_eq!( - state.get_outstanding_deposit_len(), - Err(BeaconStateError::InvalidDepositState { - deposit_count: 16, - deposit_index: 17, - }) - ); - } -} - #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit_request.rs index 7af949fef3..a21760551b 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit_request.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; -use crate::{Hash256, PublicKeyBytes, Signature}; +use crate::{Hash256, PublicKeyBytes}; +use bls::SignatureBytes; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -10,7 +11,6 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, - Eq, Hash, Clone, Serialize, @@ -25,7 +25,7 @@ pub struct DepositRequest { pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, - pub signature: Signature, + pub signature: SignatureBytes, #[serde(with = "serde_utils::quoted_u64")] pub index: u64, } @@ -36,7 +36,7 @@ impl DepositRequest { pubkey: PublicKeyBytes::empty(), withdrawal_credentials: Hash256::ZERO, amount: 0, - signature: Signature::empty(), + signature: SignatureBytes::empty(), index: 0, } .as_ssz_bytes() diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 09ef8e3c1a..23e8276209 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -151,7 +151,7 @@ pub trait EthSpec: /* * New in Electra */ - type PendingBalanceDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingPartialWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingConsolidationsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxConsolidationRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -159,6 +159,7 @@ pub trait EthSpec: type MaxAttesterSlashingsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxAttestationsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxWithdrawalRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxPendingDepositsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; fn default_spec() -> ChainSpec; @@ -331,9 +332,9 @@ pub trait EthSpec: .expect("Preset values are not configurable and never result in non-positive block body depth") } - /// Returns the `PENDING_BALANCE_DEPOSITS_LIMIT` constant for this specification. - fn pending_balance_deposits_limit() -> usize { - Self::PendingBalanceDepositsLimit::to_usize() + /// Returns the `PENDING_DEPOSITS_LIMIT` constant for this specification. + fn pending_deposits_limit() -> usize { + Self::PendingDepositsLimit::to_usize() } /// Returns the `PENDING_PARTIAL_WITHDRAWALS_LIMIT` constant for this specification. @@ -371,6 +372,11 @@ pub trait EthSpec: Self::MaxWithdrawalRequestsPerPayload::to_usize() } + /// Returns the `MAX_PENDING_DEPOSITS_PER_EPOCH` constant for this specification. + fn max_pending_deposits_per_epoch() -> usize { + Self::MaxPendingDepositsPerEpoch::to_usize() + } + fn kzg_commitments_inclusion_proof_depth() -> usize { Self::KzgCommitmentsInclusionProofDepth::to_usize() } @@ -430,7 +436,7 @@ impl EthSpec for MainnetEthSpec { type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; - type PendingBalanceDepositsLimit = U134217728; + type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidationRequestsPerPayload = U1; @@ -438,6 +444,7 @@ impl EthSpec for MainnetEthSpec { type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; + type MaxPendingDepositsPerEpoch = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -500,7 +507,8 @@ impl EthSpec for MinimalEthSpec { MaxBlsToExecutionChanges, MaxBlobsPerBlock, BytesPerFieldElement, - PendingBalanceDepositsLimit, + PendingDepositsLimit, + MaxPendingDepositsPerEpoch, MaxConsolidationRequestsPerPayload, MaxAttesterSlashingsElectra, MaxAttestationsElectra @@ -557,7 +565,7 @@ impl EthSpec for GnosisEthSpec { type BytesPerFieldElement = U32; type BytesPerBlob = U131072; type KzgCommitmentInclusionProofDepth = U17; - type PendingBalanceDepositsLimit = U134217728; + type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidationRequestsPerPayload = U1; @@ -565,6 +573,7 @@ impl EthSpec for GnosisEthSpec { type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; + type MaxPendingDepositsPerEpoch = U16; type FieldElementsPerCell = U64; type FieldElementsPerExtBlob = U8192; type BytesPerCell = U2048; diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index 694162d6ff..60f2960afb 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -52,9 +52,11 @@ pub struct ExecutionBlockHeader { pub blob_gas_used: Option, pub excess_blob_gas: Option, pub parent_beacon_block_root: Option, + pub requests_root: Option, } impl ExecutionBlockHeader { + #[allow(clippy::too_many_arguments)] pub fn from_payload( payload: ExecutionPayloadRef, rlp_empty_list_root: Hash256, @@ -63,6 +65,7 @@ impl ExecutionBlockHeader { rlp_blob_gas_used: Option, rlp_excess_blob_gas: Option, rlp_parent_beacon_block_root: Option, + rlp_requests_root: Option, ) -> Self { // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is // defined in EIP-4399. @@ -87,6 +90,7 @@ impl ExecutionBlockHeader { blob_gas_used: rlp_blob_gas_used, excess_blob_gas: rlp_excess_blob_gas, parent_beacon_block_root: rlp_parent_beacon_block_root, + requests_root: rlp_requests_root, } } } @@ -114,6 +118,7 @@ pub struct EncodableExecutionBlockHeader<'a> { pub blob_gas_used: Option, pub excess_blob_gas: Option, pub parent_beacon_block_root: Option<&'a [u8]>, + pub requests_root: Option<&'a [u8]>, } impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { @@ -139,6 +144,7 @@ impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: None, + requests_root: None, }; if let Some(withdrawals_root) = &header.withdrawals_root { encodable.withdrawals_root = Some(withdrawals_root.as_slice()); @@ -146,6 +152,9 @@ impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { if let Some(parent_beacon_block_root) = &header.parent_beacon_block_root { encodable.parent_beacon_block_root = Some(parent_beacon_block_root.as_slice()) } + if let Some(requests_root) = &header.requests_root { + encodable.requests_root = Some(requests_root.as_slice()) + } encodable } } diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs index 778260dd84..96a3905420 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution_requests.rs @@ -1,7 +1,8 @@ use crate::test_utils::TestRandom; -use crate::{ConsolidationRequest, DepositRequest, EthSpec, WithdrawalRequest}; +use crate::{ConsolidationRequest, DepositRequest, EthSpec, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; use derivative::Derivative; +use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -47,6 +48,43 @@ impl ExecutionRequests { let consolidation_bytes = Bytes::from(self.consolidations.as_ssz_bytes()); vec![deposit_bytes, withdrawal_bytes, consolidation_bytes] } + + /// Generate the execution layer `requests_hash` based on EIP-7685. + /// + /// `sha256(sha256(requests_0) ++ sha256(requests_1) ++ ...)` + pub fn requests_hash(&self) -> Hash256 { + let mut hasher = DynamicContext::new(); + + for (i, request) in self.get_execution_requests_list().iter().enumerate() { + let mut request_hasher = DynamicContext::new(); + request_hasher.update(&[i as u8]); + request_hasher.update(request); + let request_hash = request_hasher.finalize(); + + hasher.update(&request_hash); + } + + hasher.finalize().into() + } +} + +/// This is used to index into the `execution_requests` array. +#[derive(Debug, Copy, Clone)] +pub enum RequestPrefix { + Deposit, + Withdrawal, + Consolidation, +} + +impl RequestPrefix { + pub fn from_prefix(prefix: u8) -> Option { + match prefix { + 0 => Some(Self::Deposit), + 1 => Some(Self::Withdrawal), + 2 => Some(Self::Consolidation), + _ => None, + } + } } #[cfg(test)] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index eff5237834..dd304c6296 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -54,8 +54,8 @@ pub mod light_client_finality_update; pub mod light_client_optimistic_update; pub mod light_client_update; pub mod pending_attestation; -pub mod pending_balance_deposit; pub mod pending_consolidation; +pub mod pending_deposit; pub mod pending_partial_withdrawal; pub mod proposer_preparation_data; pub mod proposer_slashing; @@ -170,7 +170,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; -pub use crate::execution_requests::ExecutionRequests; +pub use crate::execution_requests::{ExecutionRequests, RequestPrefix}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; @@ -210,8 +210,8 @@ pub use crate::payload::{ FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; -pub use crate::pending_balance_deposit::PendingBalanceDeposit; pub use crate::pending_consolidation::PendingConsolidation; +pub use crate::pending_deposit::PendingDeposit; pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; pub use crate::preset::{ AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, diff --git a/consensus/types/src/pending_balance_deposit.rs b/consensus/types/src/pending_deposit.rs similarity index 68% rename from consensus/types/src/pending_balance_deposit.rs rename to consensus/types/src/pending_deposit.rs index a2bce577f8..3bee86417d 100644 --- a/consensus/types/src/pending_balance_deposit.rs +++ b/consensus/types/src/pending_deposit.rs @@ -1,4 +1,5 @@ use crate::test_utils::TestRandom; +use crate::*; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -8,7 +9,6 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, - Eq, Hash, Clone, Serialize, @@ -18,16 +18,18 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] -pub struct PendingBalanceDeposit { - #[serde(with = "serde_utils::quoted_u64")] - pub index: u64, +pub struct PendingDeposit { + pub pubkey: PublicKeyBytes, + pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, + pub signature: SignatureBytes, + pub slot: Slot, } #[cfg(test)] mod tests { use super::*; - ssz_and_tree_hash_tests!(PendingBalanceDeposit); + ssz_and_tree_hash_tests!(PendingDeposit); } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 435a74bdc3..b469b7b777 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -263,7 +263,7 @@ impl ElectraPreset { whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, max_pending_partials_per_withdrawals_sweep: spec .max_pending_partials_per_withdrawals_sweep, - pending_balance_deposits_limit: E::pending_balance_deposits_limit() as u64, + pending_balance_deposits_limit: E::pending_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 275101ddbe..222b9292a2 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, DepositData, Epoch, - EthSpec, FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, + FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -38,14 +38,15 @@ pub struct Validator { impl Validator { #[allow(clippy::arithmetic_side_effects)] pub fn from_deposit( - deposit_data: &DepositData, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, amount: u64, fork_name: ForkName, spec: &ChainSpec, ) -> Self { let mut validator = Validator { - pubkey: deposit_data.pubkey, - withdrawal_credentials: deposit_data.withdrawal_credentials, + pubkey, + withdrawal_credentials, activation_eligibility_epoch: spec.far_future_epoch, activation_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch, @@ -291,16 +292,6 @@ impl Validator { spec.max_effective_balance } } - - pub fn get_active_balance( - &self, - validator_balance: u64, - spec: &ChainSpec, - current_fork: ForkName, - ) -> u64 { - let max_effective_balance = self.get_max_effective_balance(spec, current_fork); - std::cmp::min(validator_balance, max_effective_balance) - } } impl Default for Validator { diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 390711079f..d5f4997bb7 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.6 +TESTS_TAG := v1.5.0-alpha.8 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index dfd782a22b..c1adf10770 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -86,7 +86,7 @@ type_name!(RewardsAndPenalties, "rewards_and_penalties"); type_name!(RegistryUpdates, "registry_updates"); type_name!(Slashings, "slashings"); type_name!(Eth1DataReset, "eth1_data_reset"); -type_name!(PendingBalanceDeposits, "pending_balance_deposits"); +type_name!(PendingBalanceDeposits, "pending_deposits"); type_name!(PendingConsolidations, "pending_consolidations"); type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); type_name!(SlashingsReset, "slashings_reset"); @@ -193,7 +193,7 @@ impl EpochTransition for PendingBalanceDeposits { state, spec, SinglePassConfig { - pending_balance_deposits: true, + pending_deposits: true, ..SinglePassConfig::disable_all() }, ) @@ -363,7 +363,7 @@ impl> Case for EpochProcessing { } if !fork_name.electra_enabled() - && (T::name() == "pending_consolidations" || T::name() == "pending_balance_deposits") + && (T::name() == "pending_consolidations" || T::name() == "pending_deposits") { return false; } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index a9322e5dd5..c50032a63d 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -134,7 +134,7 @@ type_name_generic!(LightClientUpdateElectra, "LightClientUpdate"); type_name_generic!(PendingAttestation); type_name!(PendingConsolidation); type_name!(PendingPartialWithdrawal); -type_name!(PendingBalanceDeposit); +type_name!(PendingDeposit); type_name!(ProposerSlashing); type_name_generic!(SignedAggregateAndProof); type_name_generic!(SignedAggregateAndProofBase, "SignedAggregateAndProof"); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 3f802d8944..292625a371 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -243,8 +243,7 @@ mod ssz_static { use types::historical_summary::HistoricalSummary; use types::{ AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, - LightClientBootstrapAltair, PendingBalanceDeposit, PendingPartialWithdrawal, - WithdrawalRequest, *, + LightClientBootstrapAltair, PendingDeposit, PendingPartialWithdrawal, WithdrawalRequest, *, }; ssz_static_test!(attestation_data, AttestationData); @@ -661,8 +660,8 @@ mod ssz_static { #[test] fn pending_balance_deposit() { - SszStaticHandler::::electra_and_later().run(); - SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); } #[test] From 1de498340c5166e4abbc3de12dae4af6dab7c6c3 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Tue, 17 Dec 2024 15:26:59 +0800 Subject: [PATCH 18/25] Add spell check and update Lighthouse book (#6627) * spellcheck config * Merge remote-tracking branch 'origin/unstable' into spellcheck * spellcheck update * update spellcheck * spell check passes * Remove ignored and add other md files * Remove some words in wordlist * CI * test spell check CI * correct spell check * Merge branch 'unstable' into spellcheck * minor fix * Merge branch 'spellcheck' of https://github.com/chong-he/lighthouse into spellcheck * Update book * mdlint * delete previous_epoch_active_gwei * Merge branch 'unstable' into spellcheck * Tweak "container runtime" wording * Try `BeaconState`s --- .github/workflows/test-suite.yml | 2 + .spellcheck.yml | 35 +++++ CONTRIBUTING.md | 2 +- README.md | 2 +- book/src/advanced_database.md | 2 +- book/src/advanced_networking.md | 6 +- book/src/api-lighthouse.md | 35 +++-- book/src/faq.md | 6 +- book/src/graffiti.md | 2 +- book/src/homebrew.md | 2 +- book/src/late-block-re-orgs.md | 19 ++- book/src/ui-faqs.md | 2 +- book/src/ui-installation.md | 2 +- book/src/validator-inclusion.md | 1 - book/src/validator-manager.md | 1 + book/src/validator-monitoring.md | 2 +- scripts/local_testnet/README.md | 8 +- wordlist.txt | 235 +++++++++++++++++++++++++++++++ 18 files changed, 331 insertions(+), 33 deletions(-) create mode 100644 .spellcheck.yml create mode 100644 wordlist.txt diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 8da46ed8ee..bba670cc22 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -363,6 +363,8 @@ jobs: run: CARGO_HOME=$(readlink -f $HOME) make vendor - name: Markdown-linter run: make mdlint + - name: Spell-check + uses: rojopolis/spellcheck-github-actions@v0 check-msrv: name: check-msrv runs-on: ubuntu-latest diff --git a/.spellcheck.yml b/.spellcheck.yml new file mode 100644 index 0000000000..692bc4d176 --- /dev/null +++ b/.spellcheck.yml @@ -0,0 +1,35 @@ +matrix: +- name: Markdown + sources: + - './book/**/*.md' + - 'README.md' + - 'CONTRIBUTING.md' + - 'SECURITY.md' + - './scripts/local_testnet/README.md' + default_encoding: utf-8 + aspell: + lang: en + dictionary: + wordlists: + - wordlist.txt + encoding: utf-8 + pipeline: + - pyspelling.filters.url: + - pyspelling.filters.markdown: + markdown_extensions: + - pymdownx.superfences: + - pymdownx.highlight: + - pymdownx.striphtml: + - pymdownx.magiclink: + - pyspelling.filters.html: + comments: false + ignores: + - code + - pre + - pyspelling.filters.context: + context_visible_first: true + delimiters: + # Ignore hex strings + - open: '0x[a-fA-F0-9]' + close: '[^a-fA-F0-9]' + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3c53558a10..4cad219c89 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -85,7 +85,7 @@ steps: 5. Commit your changes and push them to your fork with `$ git push origin your_feature_name`. 6. Go to your fork on github.com and use the web interface to create a pull - request into the sigp/lighthouse repo. + request into the sigp/lighthouse repository. From there, the repository maintainers will review the PR and either accept it or provide some constructive feedback. diff --git a/README.md b/README.md index 4b22087bcd..147a06e504 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Lighthouse is: - Built in [Rust](https://www.rust-lang.org), a modern language providing unique safety guarantees and excellent performance (comparable to C++). - Funded by various organisations, including Sigma Prime, the - Ethereum Foundation, ConsenSys, the Decentralization Foundation and private individuals. + Ethereum Foundation, Consensys, the Decentralization Foundation and private individuals. - Actively involved in the specification and security analysis of the Ethereum proof-of-stake consensus specification. diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index d8d6ea61a1..b558279730 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -56,7 +56,7 @@ that we have observed are: _a lot_ of space. It's even possible to push beyond that with `--hierarchy-exponents 0` which would store a full state every single slot (NOT RECOMMENDED). - **Less diff layers are not necessarily faster**. One might expect that the fewer diff layers there - are, the less work Lighthouse would have to do to reconstruct any particular state. In practise + are, the less work Lighthouse would have to do to reconstruct any particular state. In practice this seems to be offset by the increased size of diffs in each layer making the diffs take longer to apply. We observed no significant performance benefit from `--hierarchy-exponents 5,7,11`, and a substantial increase in space consumed. diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 732b4f51e6..c0f6b5485e 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -68,7 +68,7 @@ The steps to do port forwarding depends on the router, but the general steps are 1. Determine the default gateway IP: - On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. - - On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. + - On macOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. - On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. The default gateway IP usually looks like 192.168.X.X. Once you obtain the IP, enter it to a web browser and it will lead you to the router management page. @@ -91,7 +91,7 @@ The steps to do port forwarding depends on the router, but the general steps are - Internal port: `9001` - IP address: Choose the device that is running Lighthouse. -1. To check that you have successfully opened the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). +1. To check that you have successfully opened the ports, go to [`yougetsignal`](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). ## ENR Configuration @@ -141,7 +141,7 @@ To listen over both IPv4 and IPv6: - Set two listening addresses using the `--listen-address` flag twice ensuring the two addresses are one IPv4, and the other IPv6. When doing so, the `--port` and `--discovery-port` flags will apply exclusively to IPv4. Note - that this behaviour differs from the Ipv6 only case described above. + that this behaviour differs from the IPv6 only case described above. - If necessary, set the `--port6` flag to configure the port used for TCP and UDP over IPv6. This flag has no effect when listening over IPv6 only. - If necessary, set the `--discovery-port6` flag to configure the IPv6 UDP diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index b63505c490..5428ab8f9a 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -508,23 +508,31 @@ curl "http://localhost:5052/lighthouse/database/info" | jq ```json { - "schema_version": 18, + "schema_version": 22, "config": { - "slots_per_restore_point": 8192, - "slots_per_restore_point_set_explicitly": false, "block_cache_size": 5, + "state_cache_size": 128, + "compression_level": 1, "historic_state_cache_size": 1, + "hdiff_buffer_cache_size": 16, "compact_on_init": false, "compact_on_prune": true, "prune_payloads": true, + "hierarchy_config": { + "exponents": [ + 5, + 7, + 11 + ] + }, "prune_blobs": true, "epochs_per_blob_prune": 1, "blob_prune_margin_epochs": 0 }, "split": { - "slot": "7454656", - "state_root": "0xbecfb1c8ee209854c611ebc967daa77da25b27f1a8ef51402fdbe060587d7653", - "block_root": "0x8730e946901b0a406313d36b3363a1b7091604e1346a3410c1a7edce93239a68" + "slot": "10530592", + "state_root": "0xd27e6ce699637cf9b5c7ca632118b7ce12c2f5070bb25a27ac353ff2799d4466", + "block_root": "0x71509a1cb374773d680cd77148c73ab3563526dacb0ab837bb0c87e686962eae" }, "anchor": { "anchor_slot": "7451168", @@ -543,8 +551,19 @@ curl "http://localhost:5052/lighthouse/database/info" | jq For more information about the split point, see the [Database Configuration](./advanced_database.md) docs. -The `anchor` will be `null` unless the node has been synced with checkpoint sync and state -reconstruction has yet to be completed. For more information +For archive nodes, the `anchor` will be: + +```json +"anchor": { + "anchor_slot": "0", + "oldest_block_slot": "0", + "oldest_block_parent": "0x0000000000000000000000000000000000000000000000000000000000000000", + "state_upper_limit": "0", + "state_lower_limit": "0" + }, +``` + +indicating that all states with slots `>= 0` are available, i.e., full state history. For more information on the specific meanings of these fields see the docs on [Checkpoint Sync](./checkpoint-sync.md#reconstructing-states). diff --git a/book/src/faq.md b/book/src/faq.md index 04e5ce5bc8..d23951c8c7 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -92,7 +92,7 @@ If the reason for the error message is caused by no. 1 above, you may want to lo - Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. - The service file is not stopped properly. To overcome this, make sure that the process is stopped properly, e.g., during client updates. -- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. +- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using Geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. ### I see beacon logs showing `Error during execution engine upcheck`, what should I do? @@ -302,7 +302,7 @@ An example of the log: (debug logs can be found under `$datadir/beacon/logs`): Delayed head block, set_as_head_time_ms: 27, imported_time_ms: 168, attestable_delay_ms: 4209, available_delay_ms: 4186, execution_time_ms: 201, blob_delay_ms: 3815, observed_delay_ms: 3984, total_delay_ms: 4381, slot: 1886014, proposer_index: 733, block_root: 0xa7390baac88d50f1cbb5ad81691915f6402385a12521a670bbbd4cd5f8bf3934, service: beacon, module: beacon_chain::canonical_head:1441 ``` -The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation wil fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. +The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation will fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. Another example of log: @@ -315,7 +315,7 @@ In this example, we see that the `execution_time_ms` is 4694ms. The `execution_t ### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? -In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. +In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance. You could also check for the sync aggregate participation percentage on block explorers such as [beaconcha.in](https://beaconcha.in/). A low sync aggregate participation percentage (e.g., 60-70%) indicates that the block that you are assigned to attest to may be published late. As a result, your validator fails to correctly attest to the block. diff --git a/book/src/graffiti.md b/book/src/graffiti.md index ba9c7d05d7..7b402ea866 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -4,7 +4,7 @@ Lighthouse provides four options for setting validator graffiti. ## 1. Using the "--graffiti-file" flag on the validator client -Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded everytime a validator is chosen to propose a block. +Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded every time a validator is chosen to propose a block. Usage: `lighthouse vc --graffiti-file graffiti_file.txt` diff --git a/book/src/homebrew.md b/book/src/homebrew.md index da92dcb26c..f94764889e 100644 --- a/book/src/homebrew.md +++ b/book/src/homebrew.md @@ -31,6 +31,6 @@ Alternatively, you can find the `lighthouse` binary at: The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. -The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repo. +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repository. [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index 4a00f33aa4..fca156bda3 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -46,24 +46,31 @@ You can track the reasons for re-orgs being attempted (or not) via Lighthouse's A pair of messages at `INFO` level will be logged if a re-org opportunity is detected: -> INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 - -> INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 +```text +INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 +``` This should be followed shortly after by a `INFO` log indicating that a re-org occurred. This is expected and normal: -> INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +```text +INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +``` In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a block as normal and log the reason the re-org was not attempted at debug level: -> DEBG Not attempting re-org reason: head not late +```text +DEBG Not attempting re-org reason: head not late +``` If you are interested in digging into the timing of `forkchoiceUpdated` messages sent to the execution layer, there is also a debug log for the suppression of `forkchoiceUpdated` messages when Lighthouse thinks that a re-org is likely: -> DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +```text +DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +``` [the spec]: https://github.com/ethereum/consensus-specs/pull/3034 diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index efa6d3d4ab..0887875316 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -6,7 +6,7 @@ Yes, the most current Siren version requires Lighthouse v4.3.0 or higher to func ## 2. Where can I find my API token? -The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). +The required API token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). ## 3. How do I fix the Node Network Errors? diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md index 1444c0d633..9cd84e5160 100644 --- a/book/src/ui-installation.md +++ b/book/src/ui-installation.md @@ -1,6 +1,6 @@ # 📦 Installation -Siren supports any operating system that supports container runtimes and/or NodeJS 18, this includes Linux, MacOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren) , but running the application directly is also possible. +Siren supports any operating system that supports containers and/or NodeJS 18, this includes Linux, macOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren) , but running the application directly is also possible. ## Version Requirement diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index 092c813a1e..eef563dcdb 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -56,7 +56,6 @@ The following fields are returned: able to vote) during the current epoch. - `current_epoch_target_attesting_gwei`: the total staked gwei that attested to the majority-elected Casper FFG target epoch during the current epoch. -- `previous_epoch_active_gwei`: as per `current_epoch_active_gwei`, but during the previous epoch. - `previous_epoch_target_attesting_gwei`: see `current_epoch_target_attesting_gwei`. - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a head beacon block that is in the canonical chain. diff --git a/book/src/validator-manager.md b/book/src/validator-manager.md index a71fab1e3a..11df2af037 100644 --- a/book/src/validator-manager.md +++ b/book/src/validator-manager.md @@ -32,3 +32,4 @@ The `validator-manager` boasts the following features: - [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md) - [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) +- [Managing validators such as delete, import and list validators.](./validator-manager-api.md) diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 6439ea83a3..bbc95460ec 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -134,7 +134,7 @@ validator_monitor_attestation_simulator_source_attester_hit_total validator_monitor_attestation_simulator_source_attester_miss_total ``` -A grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). +A Grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). The attestation simulator provides an insight into the attestation performance of a beacon node. It can be used as an indication of how expediently the beacon node has completed importing blocks within the 4s time frame for an attestation to be made. diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index ca701eb7e9..159c89badb 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -1,6 +1,6 @@ # Simple Local Testnet -These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 geth execution clients using Kurtosis. +These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 Geth execution clients using Kurtosis. This setup can be useful for testing and development. ## Installation @@ -9,7 +9,7 @@ This setup can be useful for testing and development. 1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version. -1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. +1. Install [`yq`](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. ## Starting the testnet @@ -22,7 +22,7 @@ cd ./scripts/local_testnet It will build a Lighthouse docker image from the root of the directory and will take an approximately 12 minutes to complete. Once built, the testing will be started automatically. You will see a list of services running and "Started!" at the end. You can also select your own Lighthouse docker image to use by specifying it in `network_params.yml` under the `cl_image` key. -Full configuration reference for kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). +Full configuration reference for Kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). To view all running services: @@ -36,7 +36,7 @@ To view the logs: kurtosis service logs local-testnet $SERVICE_NAME ``` -where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and geth: +where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and Geth: ```bash kurtosis service logs local-testnet -f cl-1-lighthouse-geth diff --git a/wordlist.txt b/wordlist.txt new file mode 100644 index 0000000000..f06c278866 --- /dev/null +++ b/wordlist.txt @@ -0,0 +1,235 @@ +APIs +ARMv +AUR +Backends +Backfilling +Beaconcha +Besu +Broadwell +BIP +BLS +BN +BNs +BTC +BTEC +Casper +CentOS +Chiado +CMake +CoinCashew +Consensys +CORS +CPUs +DBs +DES +DHT +DNS +Dockerhub +DoS +EIP +ENR +Erigon +Esat's +ETH +EthDocker +Ethereum +Ethstaker +Exercism +Extractable +FFG +Geth +Gitcoin +Gnosis +Goerli +Grafana +Holesky +Homebrew +Infura +IPs +IPv +JSON +KeyManager +Kurtosis +LMDB +LLVM +LRU +LTO +Mainnet +MDBX +Merkle +MEV +MSRV +NAT's +Nethermind +NodeJS +NullLogger +PathBuf +PowerShell +PPA +Pre +Proto +PRs +Prysm +QUIC +RasPi +README +RESTful +Reth +RHEL +Ropsten +RPC +Ryzen +Sepolia +Somer +SSD +SSL +SSZ +Styleguide +TCP +Teku +TLS +TODOs +UDP +UI +UPnP +USD +UX +Validator +VC +VCs +VPN +Withdrawable +WSL +YAML +aarch +anonymize +api +attester +backend +backends +backfill +backfilling +beaconcha +bitfield +blockchain +bn +cli +clippy +config +cpu +cryptocurrencies +cryptographic +danksharding +datadir +datadirs +de +decrypt +decrypted +dest +dir +disincentivise +doppelgänger +dropdown +else's +env +eth +ethdo +ethereum +ethstaker +filesystem +frontend +gapped +github +graffitis +gwei +hdiffs +homebrew +hostname +html +http +https +hDiff +implementers +interoperable +io +iowait +jemalloc +json +jwt +kb +keymanager +keypair +keypairs +keystore +keystores +linter +linux +localhost +lossy +macOS +mainnet +makefile +mdBook +mev +misconfiguration +mkcert +namespace +natively +nd +ness +nginx +nitty +oom +orging +orgs +os +paul +pem +performant +pid +pre +pubkey +pubkeys +rc +reimport +resync +roadmap +runtime +rustfmt +rustup +schemas +sigmaprime +sigp +slashable +slashings +spec'd +src +stakers +subnet +subnets +systemd +testnet +testnets +th +toml +topologies +tradeoffs +transactional +tweakers +ui +unadvanced +unaggregated +unencrypted +unfinalized +untrusted +uptimes +url +validator +validators +validator's +vc +virt +webapp +withdrawable +yaml +yml From 1315c94adbc929df39c4ebbd17d627129903e3b6 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 18 Dec 2024 07:10:53 +1100 Subject: [PATCH 19/25] Unsaturate dial negotiation queue (#6711) * Unsaturate dial-negotiation count --- beacon_node/lighthouse_network/src/rpc/handler.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index e76d6d2786..0a0a6ca754 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -964,6 +964,9 @@ where request_info: (Id, RequestType), error: StreamUpgradeError, ) { + // This dialing is now considered failed + self.dial_negotiated -= 1; + let (id, req) = request_info; // map the error @@ -989,9 +992,6 @@ where StreamUpgradeError::Apply(other) => other, }; - // This dialing is now considered failed - self.dial_negotiated -= 1; - self.outbound_io_error_retries = 0; self.events_out .push(HandlerEvent::Err(HandlerErr::Outbound { From 2662dc7f8fba71a5682f8906a6f6a71374757c23 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 18 Dec 2024 05:35:58 +0530 Subject: [PATCH 20/25] Fix Sse client api (#6685) * Use reqwest eventsource for get_events api * await for Event::Open before returning stream * fmt * Merge branch 'unstable' into sse-client-fix * Ignore lint --- Cargo.lock | 28 ++++++++++++ beacon_node/beacon_chain/tests/store_tests.rs | 1 + common/eth2/Cargo.toml | 1 + common/eth2/src/lib.rs | 43 ++++++++++++++----- common/eth2/src/types.rs | 21 +-------- 5 files changed, 65 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2978a3a19f..c62e9fbc87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2576,6 +2576,7 @@ dependencies = [ "proto_array", "psutil", "reqwest", + "reqwest-eventsource", "sensitive_url", "serde", "serde_json", @@ -2977,6 +2978,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "eventsource-stream" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74fef4569247a5f429d9156b9d0a2599914385dd189c539334c625d8099d90ab" +dependencies = [ + "futures-core", + "nom", + "pin-project-lite", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -7179,6 +7191,22 @@ dependencies = [ "winreg", ] +[[package]] +name = "reqwest-eventsource" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f529a5ff327743addc322af460761dff5b50e0c826b9e6ac44c3195c50bb2026" +dependencies = [ + "eventsource-stream", + "futures-core", + "futures-timer", + "mime", + "nom", + "pin-project-lite", + "reqwest", + "thiserror 1.0.69", +] + [[package]] name = "resolv-conf" version = "0.7.0" diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 73805a8525..e1258ccdea 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2796,6 +2796,7 @@ async fn finalizes_after_resuming_from_db() { ); } +#[allow(clippy::large_stack_frames)] #[tokio::test] async fn revert_minority_fork_on_resume() { let validator_count = 16; diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index f735b4c688..912051da36 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -27,6 +27,7 @@ slashing_protection = { workspace = true } mediatype = "0.19.13" pretty_reqwest_error = { workspace = true } derivative = { workspace = true } +reqwest-eventsource = "0.5.0" [dev-dependencies] tokio = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 522c6414ea..12b1538984 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -27,6 +27,7 @@ use reqwest::{ Body, IntoUrl, RequestBuilder, Response, }; pub use reqwest::{StatusCode, Url}; +use reqwest_eventsource::{Event, EventSource}; pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; use ssz::Encode; @@ -52,6 +53,8 @@ pub const SSZ_CONTENT_TYPE_HEADER: &str = "application/octet-stream"; pub enum Error { /// The `reqwest` client raised an error. HttpClient(PrettyReqwestError), + /// The `reqwest_eventsource` client raised an error. + SseClient(reqwest_eventsource::Error), /// The server returned an error message where the body was able to be parsed. ServerMessage(ErrorMessage), /// The server returned an error message with an array of errors. @@ -93,6 +96,13 @@ impl Error { pub fn status(&self) -> Option { match self { Error::HttpClient(error) => error.inner().status(), + Error::SseClient(error) => { + if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error { + Some(*status) + } else { + None + } + } Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::StatusCode(status) => Some(*status), @@ -2592,16 +2602,29 @@ impl BeaconNodeHttpClient { .join(","); path.query_pairs_mut().append_pair("topics", &topic_string); - Ok(self - .client - .get(path) - .send() - .await? - .bytes_stream() - .map(|next| match next { - Ok(bytes) => EventKind::from_sse_bytes(bytes.as_ref()), - Err(e) => Err(Error::HttpClient(e.into())), - })) + let mut es = EventSource::get(path); + // If we don't await `Event::Open` here, then the consumer + // will not get any Message events until they start awaiting the stream. + // This is a way to register the stream with the sse server before + // message events start getting emitted. + while let Some(event) = es.next().await { + match event { + Ok(Event::Open) => break, + Err(err) => return Err(Error::SseClient(err)), + // This should never happen as we are guaranteed to get the + // Open event before any message starts coming through. + Ok(Event::Message(_)) => continue, + } + } + Ok(Box::pin(es.filter_map(|event| async move { + match event { + Ok(Event::Open) => None, + Ok(Event::Message(message)) => { + Some(EventKind::from_sse_bytes(&message.event, &message.data)) + } + Err(err) => Some(Err(Error::SseClient(err))), + } + }))) } /// `POST validator/duties/sync/{epoch}` diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index c187399ebd..a303953a86 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -13,7 +13,7 @@ use serde_json::Value; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; use std::fmt::{self, Display}; -use std::str::{from_utf8, FromStr}; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use types::beacon_block_body::KzgCommitments; @@ -1153,24 +1153,7 @@ impl EventKind { } } - pub fn from_sse_bytes(message: &[u8]) -> Result { - let s = from_utf8(message) - .map_err(|e| ServerError::InvalidServerSentEvent(format!("{:?}", e)))?; - - let mut split = s.split('\n'); - let event = split - .next() - .ok_or_else(|| { - ServerError::InvalidServerSentEvent("Could not parse event tag".to_string()) - })? - .trim_start_matches("event:"); - let data = split - .next() - .ok_or_else(|| { - ServerError::InvalidServerSentEvent("Could not parse data tag".to_string()) - })? - .trim_start_matches("data:"); - + pub fn from_sse_bytes(event: &str, data: &str) -> Result { match event { "attestation" => Ok(EventKind::Attestation(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Attestation: {:?}", e)), From 10c96f8631d7db0d875dd60f1b9828712b96d01a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 19 Dec 2024 16:45:59 +1100 Subject: [PATCH 21/25] Fix anvil 404 link in docs (#6724) * Fix anvil 404 link in docs --- testing/eth1_test_rig/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 015a632ff4..3cba908261 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -1,6 +1,6 @@ //! Provides utilities for deploying and manipulating the eth2 deposit contract on the eth1 chain. //! -//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil) to simulate +//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil) to simulate //! the deposit contract for testing beacon node eth1 integration. //! //! Not tested to work with actual clients (e.g., geth). It should work fine, however there may be From b2b1faad4e32e710eab905d6d227543c44335986 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 19 Dec 2024 09:46:03 +0400 Subject: [PATCH 22/25] Enforce alphabetically ordered cargo deps (#6678) * Enforce alphabetically ordered cargo deps * Fix test-suite * Another CI fix * Merge branch 'unstable' into cargo-sort * Fix conflicts * Merge remote-tracking branch 'origin/unstable' into cargo-sort --- .github/workflows/test-suite.yml | 16 ++++ Cargo.toml | 49 ++++++++---- account_manager/Cargo.toml | 22 +++--- beacon_node/Cargo.toml | 36 ++++----- beacon_node/beacon_chain/Cargo.toml | 4 +- beacon_node/beacon_processor/Cargo.toml | 24 +++--- beacon_node/builder_client/Cargo.toml | 4 +- beacon_node/client/Cargo.toml | 48 +++++------ beacon_node/eth1/Cargo.toml | 28 +++---- beacon_node/execution_layer/Cargo.toml | 79 +++++++++---------- beacon_node/http_api/Cargo.toml | 66 ++++++++-------- beacon_node/http_metrics/Cargo.toml | 21 +++-- beacon_node/lighthouse_network/Cargo.toml | 66 ++++++++-------- .../lighthouse_network/gossipsub/Cargo.toml | 4 +- beacon_node/network/Cargo.toml | 64 +++++++-------- beacon_node/store/Cargo.toml | 32 ++++---- beacon_node/timer/Cargo.toml | 4 +- boot_node/Cargo.toml | 18 ++--- common/account_utils/Cargo.toml | 13 ++- common/clap_utils/Cargo.toml | 3 +- common/compare_fields_derive/Cargo.toml | 2 +- common/deposit_contract/Cargo.toml | 6 +- common/directory/Cargo.toml | 1 - common/eth2/Cargo.toml | 29 ++++--- common/eth2_config/Cargo.toml | 2 +- common/eth2_interop_keypairs/Cargo.toml | 7 +- common/eth2_network_config/Cargo.toml | 26 +++--- common/eth2_wallet_manager/Cargo.toml | 1 - common/lighthouse_version/Cargo.toml | 1 - common/logging/Cargo.toml | 2 +- common/malloc_utils/Cargo.toml | 2 +- common/monitoring_api/Cargo.toml | 15 ++-- common/oneshot_broadcast/Cargo.toml | 1 - common/pretty_reqwest_error/Cargo.toml | 1 - common/sensitive_url/Cargo.toml | 3 +- common/slot_clock/Cargo.toml | 2 +- common/system_health/Cargo.toml | 6 +- common/task_executor/Cargo.toml | 8 +- common/test_random_derive/Cargo.toml | 2 +- common/unused_port/Cargo.toml | 1 - common/validator_dir/Cargo.toml | 13 ++- common/warp_utils/Cargo.toml | 19 +++-- consensus/fixed_bytes/Cargo.toml | 1 - consensus/fork_choice/Cargo.toml | 7 +- consensus/int_to_bytes/Cargo.toml | 2 +- consensus/proto_array/Cargo.toml | 4 +- consensus/safe_arith/Cargo.toml | 1 - consensus/state_processing/Cargo.toml | 26 +++--- crypto/bls/Cargo.toml | 18 ++--- crypto/eth2_key_derivation/Cargo.toml | 7 +- crypto/eth2_keystore/Cargo.toml | 19 +++-- crypto/eth2_wallet/Cargo.toml | 9 +-- crypto/kzg/Cargo.toml | 13 ++- database_manager/Cargo.toml | 8 +- lcli/Cargo.toml | 46 +++++------ lighthouse/Cargo.toml | 53 ++++++------- lighthouse/environment/Cargo.toml | 18 ++--- slasher/Cargo.toml | 30 +++---- testing/ef_tests/Cargo.toml | 22 +++--- testing/eth1_test_rig/Cargo.toml | 12 +-- .../execution_engine_integration/Cargo.toml | 22 +++--- testing/node_test_rig/Cargo.toml | 14 ++-- testing/simulator/Cargo.toml | 19 +++-- testing/state_transition_vectors/Cargo.toml | 9 +-- testing/test-test_logger/Cargo.toml | 1 - testing/web3signer_tests/Cargo.toml | 33 ++++---- validator_client/Cargo.toml | 10 +-- .../doppelganger_service/Cargo.toml | 2 +- validator_client/graffiti_file/Cargo.toml | 6 +- validator_client/http_api/Cargo.toml | 20 ++--- validator_client/http_metrics/Cargo.toml | 12 +-- .../initialized_validators/Cargo.toml | 18 ++--- validator_client/signing_method/Cargo.toml | 4 +- .../slashing_protection/Cargo.toml | 16 ++-- .../validator_services/Cargo.toml | 10 +-- validator_manager/Cargo.toml | 25 +++--- watch/Cargo.toml | 41 +++++----- 77 files changed, 655 insertions(+), 654 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index bba670cc22..65663e0cf4 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -428,6 +428,21 @@ jobs: cache-target: release - name: Run Makefile to trigger the bash script run: make cli-local + cargo-sort: + name: cargo-sort + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-sort + - name: Run cargo sort to check if Cargo.toml files are sorted + run: cargo sort --check --workspace # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. test-suite-success: @@ -455,6 +470,7 @@ jobs: 'compile-with-beta-compiler', 'cli-check', 'lockbud', + 'cargo-sort', ] steps: - uses: actions/checkout@v4 diff --git a/Cargo.toml b/Cargo.toml index 9e921190b8..23e52a306b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,11 +8,11 @@ members = [ "beacon_node/builder_client", "beacon_node/client", "beacon_node/eth1", - "beacon_node/lighthouse_network", - "beacon_node/lighthouse_network/gossipsub", "beacon_node/execution_layer", "beacon_node/http_api", "beacon_node/http_metrics", + "beacon_node/lighthouse_network", + "beacon_node/lighthouse_network/gossipsub", "beacon_node/network", "beacon_node/store", "beacon_node/timer", @@ -30,40 +30,40 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", - "common/metrics", "common/lighthouse_version", "common/lockfile", "common/logging", "common/lru_cache", "common/malloc_utils", + "common/metrics", + "common/monitoring_api", "common/oneshot_broadcast", "common/pretty_reqwest_error", "common/sensitive_url", "common/slot_clock", "common/system_health", - "common/task_executor", "common/target_check", + "common/task_executor", "common/test_random_derive", "common/unused_port", "common/validator_dir", "common/warp_utils", - "common/monitoring_api", - - "database_manager", - - "consensus/int_to_bytes", "consensus/fixed_bytes", "consensus/fork_choice", + + "consensus/int_to_bytes", "consensus/proto_array", "consensus/safe_arith", "consensus/state_processing", "consensus/swap_or_not_shuffle", "crypto/bls", - "crypto/kzg", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", + "crypto/kzg", + + "database_manager", "lcli", @@ -78,8 +78,8 @@ members = [ "testing/execution_engine_integration", "testing/node_test_rig", "testing/simulator", - "testing/test-test_logger", "testing/state_transition_vectors", + "testing/test-test_logger", "testing/web3signer_tests", "validator_client", @@ -126,8 +126,8 @@ delay_map = "0.4" derivative = "2" dirs = "3" either = "1.9" - # TODO: rust_eth_kzg is pinned for now while a perf regression is investigated - # The crate_crypto_* dependencies can be removed from this file completely once we update +# TODO: rust_eth_kzg is pinned for now while a perf regression is investigated +# The crate_crypto_* dependencies can be removed from this file completely once we update rust_eth_kzg = "=0.5.1" crate_crypto_internal_eth_kzg_bls12_381 = "=0.5.1" crate_crypto_internal_eth_kzg_erasure_codes = "=0.5.1" @@ -167,7 +167,13 @@ r2d2 = "0.8" rand = "0.8" rayon = "1.7" regex = "1" -reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } +reqwest = { version = "0.11", default-features = false, features = [ + "blocking", + "json", + "stream", + "rustls-tls", + "native-tls-vendored", +] } ring = "0.16" rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } @@ -176,7 +182,11 @@ serde_json = "1" serde_repr = "0.1" serde_yaml = "0.9" sha2 = "0.9" -slog = { version = "2", features = ["max_level_debug", "release_max_level_debug", "nested-values"] } +slog = { version = "2", features = [ + "max_level_debug", + "release_max_level_debug", + "nested-values", +] } slog-async = "2" slog-term = "2" sloggers = { version = "2", features = ["json"] } @@ -188,7 +198,12 @@ superstruct = "0.8" syn = "1" sysinfo = "0.26" tempfile = "3" -tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal", "macros"] } +tokio = { version = "1", features = [ + "rt-multi-thread", + "sync", + "signal", + "macros", +] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.7", features = ["codec", "compat", "time"] } tracing = "0.1.40" @@ -267,7 +282,7 @@ validator_dir = { path = "common/validator_dir" } validator_http_api = { path = "validator_client/http_api" } validator_http_metrics = { path = "validator_client/http_metrics" } validator_metrics = { path = "validator_client/validator_metrics" } -validator_store= { path = "validator_client/validator_store" } +validator_store = { path = "validator_client/validator_store" } warp_utils = { path = "common/warp_utils" } xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "50d63cdf1878e5cf3538e9aae5eed34a22c64e4a" } zstd = "0.13" diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 48230bb281..a7752d621f 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -8,25 +8,25 @@ authors = [ edition = { workspace = true } [dependencies] +account_utils = { workspace = true } bls = { workspace = true } clap = { workspace = true } -types = { workspace = true } -environment = { workspace = true } -eth2_network_config = { workspace = true } clap_utils = { workspace = true } directory = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +eth2_keystore = { workspace = true } +eth2_network_config = { workspace = true } eth2_wallet = { workspace = true } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } -validator_dir = { workspace = true } -tokio = { workspace = true } -eth2_keystore = { workspace = true } -account_utils = { workspace = true } -slashing_protection = { workspace = true } -eth2 = { workspace = true } -safe_arith = { workspace = true } -slot_clock = { workspace = true } filesystem = { workspace = true } +safe_arith = { workspace = true } sensitive_url = { workspace = true } +slashing_protection = { workspace = true } +slot_clock = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +validator_dir = { workspace = true } zeroize = { workspace = true } [dev-dependencies] diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 15cdf15dc5..7da65ad742 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -20,28 +20,28 @@ write_ssz_files = [ ] # Writes debugging .ssz files to /tmp during block processing. [dependencies] -eth2_config = { workspace = true } +account_utils = { workspace = true } beacon_chain = { workspace = true } -types = { workspace = true } -store = { workspace = true } -client = { path = "client" } clap = { workspace = true } -slog = { workspace = true } -dirs = { workspace = true } -directory = { workspace = true } -environment = { workspace = true } -task_executor = { workspace = true } -genesis = { workspace = true } -execution_layer = { workspace = true } -lighthouse_network = { workspace = true } -serde_json = { workspace = true } clap_utils = { workspace = true } -hyper = { workspace = true } +client = { path = "client" } +directory = { workspace = true } +dirs = { workspace = true } +environment = { workspace = true } +eth2_config = { workspace = true } +execution_layer = { workspace = true } +genesis = { workspace = true } hex = { workspace = true } -slasher = { workspace = true } +http_api = { workspace = true } +hyper = { workspace = true } +lighthouse_network = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } -http_api = { workspace = true } -unused_port = { workspace = true } +serde_json = { workspace = true } +slasher = { workspace = true } +slog = { workspace = true } +store = { workspace = true } strum = { workspace = true } -account_utils = { workspace = true } +task_executor = { workspace = true } +types = { workspace = true } +unused_port = { workspace = true } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index b0fa013180..7b725d3519 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -18,9 +18,9 @@ portable = ["bls/supranational-portable"] test_backfill = [] [dev-dependencies] +criterion = { workspace = true } maplit = { workspace = true } serde_json = { workspace = true } -criterion = { workspace = true } [dependencies] alloy-primitives = { workspace = true } @@ -42,11 +42,11 @@ hex = { workspace = true } int_to_bytes = { workspace = true } itertools = { workspace = true } kzg = { workspace = true } -metrics = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } lru = { workspace = true } merkle_proof = { workspace = true } +metrics = { workspace = true } oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } operation_pool = { workspace = true } parking_lot = { workspace = true } diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 9273137bf6..c96e0868d7 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -4,22 +4,22 @@ version = "0.1.0" edition = { workspace = true } [dependencies] -slog = { workspace = true } -itertools = { workspace = true } -logging = { workspace = true } -tokio = { workspace = true } -tokio-util = { workspace = true } -futures = { workspace = true } fnv = { workspace = true } +futures = { workspace = true } +itertools = { workspace = true } +lighthouse_network = { workspace = true } +logging = { workspace = true } +metrics = { workspace = true } +num_cpus = { workspace = true } +parking_lot = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } strum = { workspace = true } task_executor = { workspace = true } -slot_clock = { workspace = true } -lighthouse_network = { workspace = true } +tokio = { workspace = true } +tokio-util = { workspace = true } types = { workspace = true } -metrics = { workspace = true } -parking_lot = { workspace = true } -num_cpus = { workspace = true } -serde = { workspace = true } [dev-dependencies] tokio = { workspace = true, features = ["test-util"] } diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index c3658f45c7..3531e81c84 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -5,8 +5,8 @@ edition = { workspace = true } authors = ["Sean Anderson "] [dependencies] +eth2 = { workspace = true } +lighthouse_version = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } -eth2 = { workspace = true } serde = { workspace = true } -lighthouse_version = { workspace = true } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 4df13eb3d4..614115eb58 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -5,41 +5,41 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dev-dependencies] +operation_pool = { workspace = true } serde_yaml = { workspace = true } state_processing = { workspace = true } -operation_pool = { workspace = true } tokio = { workspace = true } [dependencies] beacon_chain = { workspace = true } -store = { workspace = true } -network = { workspace = true } -timer = { path = "../timer" } -lighthouse_network = { workspace = true } -types = { workspace = true } -eth2_config = { workspace = true } -slot_clock = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -slog = { workspace = true } -tokio = { workspace = true } -futures = { workspace = true } +beacon_processor = { workspace = true } +directory = { workspace = true } dirs = { workspace = true } +environment = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } -kzg = { workspace = true } -sensitive_url = { workspace = true } +eth2_config = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } genesis = { workspace = true } -task_executor = { workspace = true } -environment = { workspace = true } -metrics = { workspace = true } -time = "0.3.5" -directory = { workspace = true } http_api = { workspace = true } http_metrics = { path = "../http_metrics" } +kzg = { workspace = true } +lighthouse_network = { workspace = true } +metrics = { workspace = true } +monitoring_api = { workspace = true } +network = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } slasher = { workspace = true } slasher_service = { path = "../../slasher/service" } -monitoring_api = { workspace = true } -execution_layer = { workspace = true } -beacon_processor = { workspace = true } -ethereum_ssz = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +store = { workspace = true } +task_executor = { workspace = true } +time = "0.3.5" +timer = { path = "../timer" } +tokio = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 50400a77e0..8ccd50aad8 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -5,27 +5,27 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dev-dependencies] +environment = { workspace = true } eth1_test_rig = { workspace = true } serde_yaml = { workspace = true } sloggers = { workspace = true } -environment = { workspace = true } [dependencies] -execution_layer = { workspace = true } -futures = { workspace = true } -serde = { workspace = true } -types = { workspace = true } -merkle_proof = { workspace = true } +eth2 = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -parking_lot = { workspace = true } -slog = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } logging = { workspace = true } -superstruct = { workspace = true } -tokio = { workspace = true } -state_processing = { workspace = true } +merkle_proof = { workspace = true } metrics = { workspace = true } -task_executor = { workspace = true } -eth2 = { workspace = true } +parking_lot = { workspace = true } sensitive_url = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +state_processing = { workspace = true } +superstruct = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 0ef101fae7..7eb7b4a15e 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -2,54 +2,53 @@ name = "execution_layer" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +alloy-consensus = { workspace = true } alloy-primitives = { workspace = true } -types = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } -logging = { workspace = true } -sensitive_url = { workspace = true } -reqwest = { workspace = true } -ethereum_serde_utils = { workspace = true } -serde_json = { workspace = true } -serde = { workspace = true } -warp = { workspace = true } -jsonwebtoken = "9" +alloy-rlp = { workspace = true } +arc-swap = "1.6.0" +builder_client = { path = "../builder_client" } bytes = { workspace = true } -task_executor = { workspace = true } -hex = { workspace = true } -ethereum_ssz = { workspace = true } -ssz_types = { workspace = true } eth2 = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +ethers-core = { workspace = true } +fixed_bytes = { workspace = true } +fork_choice = { workspace = true } +hash-db = "0.15.2" +hash256-std-hasher = "0.15.2" +hex = { workspace = true } +jsonwebtoken = "9" +keccak-hash = "0.10.0" kzg = { workspace = true } -state_processing = { workspace = true } -superstruct = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } lru = { workspace = true } +metrics = { workspace = true } +parking_lot = { workspace = true } +pretty_reqwest_error = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +ssz_types = { workspace = true } +state_processing = { workspace = true } +strum = { workspace = true } +superstruct = { workspace = true } +task_executor = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } -parking_lot = { workspace = true } -slot_clock = { workspace = true } -tempfile = { workspace = true } -rand = { workspace = true } -zeroize = { workspace = true } -metrics = { workspace = true } -ethers-core = { workspace = true } -builder_client = { path = "../builder_client" } -fork_choice = { workspace = true } -tokio-stream = { workspace = true } -strum = { workspace = true } -keccak-hash = "0.10.0" -hash256-std-hasher = "0.15.2" triehash = "0.8.4" -hash-db = "0.15.2" -pretty_reqwest_error = { workspace = true } -arc-swap = "1.6.0" -eth2_network_config = { workspace = true } -alloy-rlp = { workspace = true } -alloy-consensus = { workspace = true } -lighthouse_version = { workspace = true } -fixed_bytes = { workspace = true } -sha2 = { workspace = true } +types = { workspace = true } +warp = { workspace = true } +zeroize = { workspace = true } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 638fe0f219..5d601008bc 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -6,49 +6,49 @@ edition = { workspace = true } autotests = false # using a single test binary compiles faster [dependencies] -warp = { workspace = true } -serde = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -types = { workspace = true } -hex = { workspace = true } beacon_chain = { workspace = true } -eth2 = { workspace = true } -slog = { workspace = true } -network = { workspace = true } -lighthouse_network = { workspace = true } -eth1 = { workspace = true } -state_processing = { workspace = true } -lighthouse_version = { workspace = true } -metrics = { workspace = true } -warp_utils = { workspace = true } -slot_clock = { workspace = true } -ethereum_ssz = { workspace = true } +beacon_processor = { workspace = true } bs58 = "0.4.0" -futures = { workspace = true } +bytes = { workspace = true } +directory = { workspace = true } +eth1 = { workspace = true } +eth2 = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } execution_layer = { workspace = true } -parking_lot = { workspace = true } -safe_arith = { workspace = true } -task_executor = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +lighthouse_network = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } lru = { workspace = true } -tree_hash = { workspace = true } +metrics = { workspace = true } +network = { workspace = true } +operation_pool = { workspace = true } +parking_lot = { workspace = true } +rand = { workspace = true } +safe_arith = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +state_processing = { workspace = true } +store = { workspace = true } sysinfo = { workspace = true } system_health = { path = "../../common/system_health" } -directory = { workspace = true } -logging = { workspace = true } -ethereum_serde_utils = { workspace = true } -operation_pool = { workspace = true } -sensitive_url = { workspace = true } -store = { workspace = true } -bytes = { workspace = true } -beacon_processor = { workspace = true } -rand = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } [dev-dependencies] -serde_json = { workspace = true } -proto_array = { workspace = true } genesis = { workspace = true } logging = { workspace = true } +proto_array = { workspace = true } +serde_json = { workspace = true } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index 97ba72a2ac..d92f986440 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -3,24 +3,23 @@ name = "http_metrics" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = { workspace = true } +beacon_chain = { workspace = true } +lighthouse_network = { workspace = true } +lighthouse_version = { workspace = true } +malloc_utils = { workspace = true } +metrics = { workspace = true } serde = { workspace = true } slog = { workspace = true } -beacon_chain = { workspace = true } -store = { workspace = true } -lighthouse_network = { workspace = true } slot_clock = { workspace = true } -metrics = { workspace = true } -lighthouse_version = { workspace = true } +store = { workspace = true } +warp = { workspace = true } warp_utils = { workspace = true } -malloc_utils = { workspace = true } [dev-dependencies] -tokio = { workspace = true } -reqwest = { workspace = true } -types = { workspace = true } logging = { workspace = true } +reqwest = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index eccc244d59..485f32b37a 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,49 +5,49 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] -alloy-primitives = { workspace = true} +alloy-primitives = { workspace = true } +alloy-rlp = { workspace = true } +bytes = { workspace = true } +delay_map = { workspace = true } +directory = { workspace = true } +dirs = { workspace = true } discv5 = { workspace = true } -gossipsub = { workspace = true } -unsigned-varint = { version = "0.8", features = ["codec"] } -ssz_types = { workspace = true } -types = { workspace = true } -serde = { workspace = true } +either = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -slog = { workspace = true } -lighthouse_version = { workspace = true } -tokio = { workspace = true } -futures = { workspace = true } -dirs = { workspace = true } fnv = { workspace = true } -metrics = { workspace = true } -smallvec = { workspace = true } -tokio-io-timeout = "1" +futures = { workspace = true } +gossipsub = { workspace = true } +hex = { workspace = true } +itertools = { workspace = true } +libp2p-mplex = "0.42" +lighthouse_version = { workspace = true } lru = { workspace = true } lru_cache = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } -sha2 = { workspace = true } -snap = { workspace = true } -hex = { workspace = true } -tokio-util = { workspace = true } -tiny-keccak = "2" -task_executor = { workspace = true } +prometheus-client = "0.22.0" rand = { workspace = true } -directory = { workspace = true } regex = { workspace = true } +serde = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } +smallvec = { workspace = true } +snap = { workspace = true } +ssz_types = { workspace = true } strum = { workspace = true } superstruct = { workspace = true } -prometheus-client = "0.22.0" +task_executor = { workspace = true } +tiny-keccak = "2" +tokio = { workspace = true } +tokio-io-timeout = "1" +tokio-util = { workspace = true } +types = { workspace = true } +unsigned-varint = { version = "0.8", features = ["codec"] } unused_port = { workspace = true } -delay_map = { workspace = true } -bytes = { workspace = true } -either = { workspace = true } -itertools = { workspace = true } -alloy-rlp = { workspace = true } # Local dependencies void = "1.0.2" -libp2p-mplex = "0.42" [dependencies.libp2p] version = "0.54" @@ -55,13 +55,13 @@ default-features = false features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] [dev-dependencies] -slog-term = { workspace = true } -slog-async = { workspace = true } -tempfile = { workspace = true } -quickcheck = { workspace = true } -quickcheck_macros = { workspace = true } async-channel = { workspace = true } logging = { workspace = true } +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } +slog-async = { workspace = true } +slog-term = { workspace = true } +tempfile = { workspace = true } [features] libp2p-websocket = [] diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index 6cbe6d3a1c..61f5730c08 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -24,9 +24,10 @@ fnv = "1.0.7" futures = "0.3.30" futures-timer = "3.0.2" getrandom = "0.2.12" -hashlink.workspace = true +hashlink = { workspace = true } hex_fmt = "0.3.0" libp2p = { version = "0.54", default-features = false } +prometheus-client = "0.22.0" quick-protobuf = "0.8" quick-protobuf-codec = "0.3" rand = "0.8" @@ -35,7 +36,6 @@ serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" tracing = "0.1.37" void = "1.0.2" -prometheus-client = "0.22.0" web-time = "1.1.0" [dev-dependencies] diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 6fc818e9c9..44f6c54bbc 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,51 +5,51 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dev-dependencies] -sloggers = { workspace = true } +bls = { workspace = true } +eth2 = { workspace = true } +eth2_network_config = { workspace = true } genesis = { workspace = true } +gossipsub = { workspace = true } +kzg = { workspace = true } matches = "0.1.8" serde_json = { workspace = true } -slog-term = { workspace = true } slog-async = { workspace = true } -eth2 = { workspace = true } -gossipsub = { workspace = true } -eth2_network_config = { workspace = true } -kzg = { workspace = true } -bls = { workspace = true } +slog-term = { workspace = true } +sloggers = { workspace = true } [dependencies] alloy-primitives = { workspace = true } -async-channel = { workspace = true } -anyhow = { workspace = true } -beacon_chain = { workspace = true } -store = { workspace = true } -lighthouse_network = { workspace = true } -types = { workspace = true } -slot_clock = { workspace = true } -slog = { workspace = true } -hex = { workspace = true } -ethereum_ssz = { workspace = true } -ssz_types = { workspace = true } -futures = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -smallvec = { workspace = true } -rand = { workspace = true } -fnv = { workspace = true } alloy-rlp = { workspace = true } -metrics = { workspace = true } -logging = { workspace = true } -task_executor = { workspace = true } +anyhow = { workspace = true } +async-channel = { workspace = true } +beacon_chain = { workspace = true } +beacon_processor = { workspace = true } +delay_map = { workspace = true } +derivative = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +fnv = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } igd-next = "0.14" itertools = { workspace = true } +lighthouse_network = { workspace = true } +logging = { workspace = true } lru_cache = { workspace = true } -strum = { workspace = true } -derivative = { workspace = true } -delay_map = { workspace = true } +metrics = { workspace = true } operation_pool = { workspace = true } -execution_layer = { workspace = true } -beacon_processor = { workspace = true } parking_lot = { workspace = true } +rand = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +smallvec = { workspace = true } +ssz_types = { workspace = true } +store = { workspace = true } +strum = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +types = { workspace = true } [features] # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7cee16c353..21d0cf8dec 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -5,34 +5,34 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } beacon_chain = { workspace = true } criterion = { workspace = true } rand = { workspace = true, features = ["small_rng"] } +tempfile = { workspace = true } [dependencies] +bls = { workspace = true } db-key = "0.0.5" -leveldb = { version = "0.8" } -parking_lot = { workspace = true } -itertools = { workspace = true } +directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +itertools = { workspace = true } +leveldb = { version = "0.8" } +logging = { workspace = true } +lru = { workspace = true } +metrics = { workspace = true } +parking_lot = { workspace = true } +safe_arith = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +sloggers = { workspace = true } +smallvec = { workspace = true } +state_processing = { workspace = true } +strum = { workspace = true } superstruct = { workspace = true } types = { workspace = true } -safe_arith = { workspace = true } -state_processing = { workspace = true } -slog = { workspace = true } -serde = { workspace = true } -metrics = { workspace = true } -lru = { workspace = true } -sloggers = { workspace = true } -directory = { workspace = true } -strum = { workspace = true } xdelta3 = { workspace = true } zstd = { workspace = true } -bls = { workspace = true } -smallvec = { workspace = true } -logging = { workspace = true } [[bench]] name = "hdiff" diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index afb93f3657..546cc2ed41 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } [dependencies] beacon_chain = { workspace = true } -slot_clock = { workspace = true } -tokio = { workspace = true } slog = { workspace = true } +slot_clock = { workspace = true } task_executor = { workspace = true } +tokio = { workspace = true } diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index c60d308cbb..7c8d2b16fd 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -6,19 +6,19 @@ edition = { workspace = true } [dependencies] beacon_node = { workspace = true } +bytes = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -lighthouse_network = { workspace = true } -types = { workspace = true } +eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } -slog = { workspace = true } -tokio = { workspace = true } +hex = { workspace = true } +lighthouse_network = { workspace = true } log = { workspace = true } -slog-term = { workspace = true } logging = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } slog-async = { workspace = true } slog-scope = "4.3.0" -hex = { workspace = true } -serde = { workspace = true } -eth2_network_config = { workspace = true } -bytes = { workspace = true } +slog-term = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index e66bf14233..dece975d37 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -3,20 +3,19 @@ name = "account_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = { workspace = true } -eth2_wallet = { workspace = true } +directory = { workspace = true } eth2_keystore = { workspace = true } +eth2_wallet = { workspace = true } filesystem = { workspace = true } -zeroize = { workspace = true } +rand = { workspace = true } +regex = { workspace = true } +rpassword = "5.0.0" serde = { workspace = true } serde_yaml = { workspace = true } slog = { workspace = true } types = { workspace = true } validator_dir = { workspace = true } -regex = { workspace = true } -rpassword = "5.0.0" -directory = { workspace = true } +zeroize = { workspace = true } diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 73823ae24e..f3c166bda9 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -3,16 +3,15 @@ name = "clap_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] alloy-primitives = { workspace = true } clap = { workspace = true } -hex = { workspace = true } dirs = { workspace = true } eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } +hex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/common/compare_fields_derive/Cargo.toml b/common/compare_fields_derive/Cargo.toml index b4bbbaa436..19682bf367 100644 --- a/common/compare_fields_derive/Cargo.toml +++ b/common/compare_fields_derive/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } proc-macro = true [dependencies] -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index a03ac2178f..953fde1af7 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -7,13 +7,13 @@ edition = { workspace = true } build = "build.rs" [build-dependencies] +hex = { workspace = true } reqwest = { workspace = true } serde_json = { workspace = true } sha2 = { workspace = true } -hex = { workspace = true } [dependencies] -types = { workspace = true } +ethabi = "16.0.0" ethereum_ssz = { workspace = true } tree_hash = { workspace = true } -ethabi = "16.0.0" +types = { workspace = true } diff --git a/common/directory/Cargo.toml b/common/directory/Cargo.toml index f724337261..9c3ced9097 100644 --- a/common/directory/Cargo.toml +++ b/common/directory/Cargo.toml @@ -3,7 +3,6 @@ name = "directory" version = "0.1.0" authors = ["pawan "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 912051da36..9d6dea100d 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -3,31 +3,30 @@ name = "eth2" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde = { workspace = true } -serde_json = { workspace = true } -ssz_types = { workspace = true } -types = { workspace = true } -reqwest = { workspace = true } -lighthouse_network = { workspace = true } -proto_array = { workspace = true } -ethereum_serde_utils = { workspace = true } +derivative = { workspace = true } eth2_keystore = { workspace = true } -zeroize = { workspace = true } -sensitive_url = { workspace = true } +ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -futures-util = "0.3.8" futures = { workspace = true } -store = { workspace = true } -slashing_protection = { workspace = true } +futures-util = "0.3.8" +lighthouse_network = { workspace = true } mediatype = "0.19.13" pretty_reqwest_error = { workspace = true } -derivative = { workspace = true } +proto_array = { workspace = true } +reqwest = { workspace = true } reqwest-eventsource = "0.5.0" +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +slashing_protection = { workspace = true } +ssz_types = { workspace = true } +store = { workspace = true } +types = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] tokio = { workspace = true } diff --git a/common/eth2_config/Cargo.toml b/common/eth2_config/Cargo.toml index 20c3b0b6f2..509f5ff87e 100644 --- a/common/eth2_config/Cargo.toml +++ b/common/eth2_config/Cargo.toml @@ -5,5 +5,5 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -types = { workspace = true } paste = { workspace = true } +types = { workspace = true } diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 5971b934e0..c19b32014e 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -3,16 +3,15 @@ name = "eth2_interop_keypairs" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -num-bigint = "0.4.2" +bls = { workspace = true } ethereum_hashing = { workspace = true } hex = { workspace = true } -serde_yaml = { workspace = true } +num-bigint = "0.4.2" serde = { workspace = true } -bls = { workspace = true } +serde_yaml = { workspace = true } [dev-dependencies] base64 = "0.13.0" diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 09cf2072d2..a255e04229 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -7,25 +7,25 @@ edition = { workspace = true } build = "build.rs" [build-dependencies] -zip = { workspace = true } eth2_config = { workspace = true } +zip = { workspace = true } [dev-dependencies] +ethereum_ssz = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } -ethereum_ssz = { workspace = true } [dependencies] -serde_yaml = { workspace = true } -types = { workspace = true } -eth2_config = { workspace = true } -discv5 = { workspace = true } -reqwest = { workspace = true } -pretty_reqwest_error = { workspace = true } -sha2 = { workspace = true } -url = { workspace = true } -sensitive_url = { workspace = true } -slog = { workspace = true } -logging = { workspace = true } bytes = { workspace = true } +discv5 = { workspace = true } +eth2_config = { workspace = true } kzg = { workspace = true } +logging = { workspace = true } +pretty_reqwest_error = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +serde_yaml = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } +types = { workspace = true } +url = { workspace = true } diff --git a/common/eth2_wallet_manager/Cargo.toml b/common/eth2_wallet_manager/Cargo.toml index f471757065..a6eb24c78c 100644 --- a/common/eth2_wallet_manager/Cargo.toml +++ b/common/eth2_wallet_manager/Cargo.toml @@ -3,7 +3,6 @@ name = "eth2_wallet_manager" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/lighthouse_version/Cargo.toml b/common/lighthouse_version/Cargo.toml index 3c4f9fe50c..164e3e47a7 100644 --- a/common/lighthouse_version/Cargo.toml +++ b/common/lighthouse_version/Cargo.toml @@ -3,7 +3,6 @@ name = "lighthouse_version" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 73cbdf44d4..b2829a48d8 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -19,7 +19,7 @@ sloggers = { workspace = true } take_mut = "0.2.2" tokio = { workspace = true, features = [ "time" ] } tracing = "0.1" +tracing-appender = { workspace = true } tracing-core = { workspace = true } tracing-log = { workspace = true } tracing-subscriber = { workspace = true } -tracing-appender = { workspace = true } diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 79a07eed16..64fb7b9aad 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -metrics = { workspace = true } libc = "0.2.79" +metrics = { workspace = true } parking_lot = { workspace = true } tikv-jemalloc-ctl = { version = "0.6.0", optional = true, features = ["stats"] } diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 2da32c307e..5008c86e85 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -3,19 +3,18 @@ name = "monitoring_api" version = "0.1.0" authors = ["pawan "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -reqwest = { workspace = true } -task_executor = { workspace = true } -tokio = { workspace = true } eth2 = { workspace = true } -serde_json = { workspace = true } -serde = { workspace = true } lighthouse_version = { workspace = true } metrics = { workspace = true } +regex = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } slog = { workspace = true } store = { workspace = true } -regex = { workspace = true } -sensitive_url = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } diff --git a/common/oneshot_broadcast/Cargo.toml b/common/oneshot_broadcast/Cargo.toml index 12c9b40bc8..8a358ef851 100644 --- a/common/oneshot_broadcast/Cargo.toml +++ b/common/oneshot_broadcast/Cargo.toml @@ -2,7 +2,6 @@ name = "oneshot_broadcast" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/pretty_reqwest_error/Cargo.toml b/common/pretty_reqwest_error/Cargo.toml index dc79832cd3..4311601bcd 100644 --- a/common/pretty_reqwest_error/Cargo.toml +++ b/common/pretty_reqwest_error/Cargo.toml @@ -2,7 +2,6 @@ name = "pretty_reqwest_error" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/sensitive_url/Cargo.toml b/common/sensitive_url/Cargo.toml index d218c8d93a..ff56209722 100644 --- a/common/sensitive_url/Cargo.toml +++ b/common/sensitive_url/Cargo.toml @@ -3,9 +3,8 @@ name = "sensitive_url" version = "0.1.0" authors = ["Mac L "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -url = { workspace = true } serde = { workspace = true } +url = { workspace = true } diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index c2f330cd50..2e1982efb1 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -5,6 +5,6 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -types = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } +types = { workspace = true } diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index be339f2779..034683f72e 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -5,7 +5,7 @@ edition = { workspace = true } [dependencies] lighthouse_network = { workspace = true } -types = { workspace = true } -sysinfo = { workspace = true } -serde = { workspace = true } parking_lot = { workspace = true } +serde = { workspace = true } +sysinfo = { workspace = true } +types = { workspace = true } diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 26bcd7b339..c1ac4b55a9 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -11,10 +11,10 @@ tracing = ["dep:tracing"] [dependencies] async-channel = { workspace = true } -tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } -slog = { workspace = true, optional = true } futures = { workspace = true } -metrics = { workspace = true } -sloggers = { workspace = true, optional = true } logging = { workspace = true, optional = true } +metrics = { workspace = true } +slog = { workspace = true, optional = true } +sloggers = { workspace = true, optional = true } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } tracing = { workspace = true, optional = true } diff --git a/common/test_random_derive/Cargo.toml b/common/test_random_derive/Cargo.toml index 79308797a4..b38d5ef63a 100644 --- a/common/test_random_derive/Cargo.toml +++ b/common/test_random_derive/Cargo.toml @@ -9,5 +9,5 @@ description = "Procedural derive macros for implementation of TestRandom trait" proc-macro = true [dependencies] -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 95dbf59186..2d771cd600 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -2,7 +2,6 @@ name = "unused_port" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index ae8742fe07..773431c93c 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -6,21 +6,20 @@ edition = { workspace = true } [features] insecure_keys = [] - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] bls = { workspace = true } +deposit_contract = { workspace = true } +derivative = { workspace = true } +directory = { workspace = true } eth2_keystore = { workspace = true } filesystem = { workspace = true } -types = { workspace = true } -rand = { workspace = true } -deposit_contract = { workspace = true } -tree_hash = { workspace = true } hex = { workspace = true } -derivative = { workspace = true } lockfile = { workspace = true } -directory = { workspace = true } +rand = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index a9407c392d..4a3cde54a9 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -3,20 +3,19 @@ name = "warp_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = { workspace = true } -eth2 = { workspace = true } -types = { workspace = true } beacon_chain = { workspace = true } -state_processing = { workspace = true } -safe_arith = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -tokio = { workspace = true } +bytes = { workspace = true } +eth2 = { workspace = true } headers = "0.3.2" metrics = { workspace = true } +safe_arith = { workspace = true } +serde = { workspace = true } serde_array_query = "0.1.0" -bytes = { workspace = true } +serde_json = { workspace = true } +state_processing = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +warp = { workspace = true } diff --git a/consensus/fixed_bytes/Cargo.toml b/consensus/fixed_bytes/Cargo.toml index e5201a0455..ab29adfb1b 100644 --- a/consensus/fixed_bytes/Cargo.toml +++ b/consensus/fixed_bytes/Cargo.toml @@ -3,7 +3,6 @@ name = "fixed_bytes" version = "0.1.0" authors = ["Eitan Seri-Levi "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index b32e0aa665..3bd18e922a 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -3,17 +3,16 @@ name = "fork_choice" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -types = { workspace = true } -state_processing = { workspace = true } -proto_array = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } metrics = { workspace = true } +proto_array = { workspace = true } slog = { workspace = true } +state_processing = { workspace = true } +types = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index e99d1af8e5..c639dfce8d 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } bytes = { workspace = true } [dev-dependencies] -yaml-rust2 = "0.8" hex = { workspace = true } +yaml-rust2 = "0.8" diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 99f98cf545..bd6757c0fa 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -9,10 +9,10 @@ name = "proto_array" path = "src/bin.rs" [dependencies] -types = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +safe_arith = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } -safe_arith = { workspace = true } superstruct = { workspace = true } +types = { workspace = true } diff --git a/consensus/safe_arith/Cargo.toml b/consensus/safe_arith/Cargo.toml index 6f2e4b811c..9ac9fe28d3 100644 --- a/consensus/safe_arith/Cargo.toml +++ b/consensus/safe_arith/Cargo.toml @@ -3,7 +3,6 @@ name = "safe_arith" version = "0.1.0" authors = ["Michael Sproul "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index b7f6ef7b2a..502ffe3cf6 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -5,30 +5,30 @@ authors = ["Paul Hauner ", "Michael Sproul "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sha2 = { workspace = true } -zeroize = { workspace = true } +bls = { workspace = true } num-bigint-dig = { version = "0.8.4", features = ["zeroize"] } ring = { workspace = true } -bls = { workspace = true } +sha2 = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] hex = { workspace = true } diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index bb6222807b..61d2722efb 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -3,25 +3,24 @@ name = "eth2_keystore" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = { workspace = true } +aes = { version = "0.7", features = ["ctr"] } +bls = { workspace = true } +eth2_key_derivation = { workspace = true } +hex = { workspace = true } hmac = "0.11.0" pbkdf2 = { version = "0.8.0", default-features = false } +rand = { workspace = true } scrypt = { version = "0.7.0", default-features = false } +serde = { workspace = true } +serde_json = { workspace = true } +serde_repr = { workspace = true } sha2 = { workspace = true } +unicode-normalization = "0.1.16" uuid = { workspace = true } zeroize = { workspace = true } -serde = { workspace = true } -serde_repr = { workspace = true } -hex = { workspace = true } -bls = { workspace = true } -serde_json = { workspace = true } -eth2_key_derivation = { workspace = true } -unicode-normalization = "0.1.16" -aes = { version = "0.7", features = ["ctr"] } [dev-dependencies] tempfile = { workspace = true } diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index f3af6aab59..5327bdc163 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -3,18 +3,17 @@ name = "eth2_wallet" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +eth2_key_derivation = { workspace = true } +eth2_keystore = { workspace = true } +rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } -uuid = { workspace = true } -rand = { workspace = true } -eth2_keystore = { workspace = true } -eth2_key_derivation = { workspace = true } tiny-bip39 = "1" +uuid = { workspace = true } [dev-dependencies] hex = { workspace = true } diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index ce55f83639..bfe0f19cd0 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -3,22 +3,21 @@ name = "kzg" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = "2021" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] arbitrary = { workspace = true } +c-kzg = { workspace = true } +derivative = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -derivative = { workspace = true } -serde = { workspace = true } -ethereum_serde_utils = { workspace = true } hex = { workspace = true } -ethereum_hashing = { workspace = true } -c-kzg = { workspace = true } rust_eth_kzg = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } +tree_hash = { workspace = true } [dev-dependencies] criterion = { workspace = true } diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index 96176f3fba..a7a54b1416 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -10,8 +10,8 @@ clap = { workspace = true } clap_utils = { workspace = true } environment = { workspace = true } hex = { workspace = true } -store = { workspace = true } -types = { workspace = true } -slog = { workspace = true } -strum = { workspace = true } serde = { workspace = true } +slog = { workspace = true } +store = { workspace = true } +strum = { workspace = true } +types = { workspace = true } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 9612bded47..72be77a70b 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -11,36 +11,36 @@ fake_crypto = ['bls/fake_crypto'] jemalloc = ["malloc_utils/jemalloc"] [dependencies] +account_utils = { workspace = true } +beacon_chain = { workspace = true } bls = { workspace = true } clap = { workspace = true } -log = { workspace = true } -sloggers = { workspace = true } -serde = { workspace = true } -serde_yaml = { workspace = true } -serde_json = { workspace = true } +clap_utils = { workspace = true } +deposit_contract = { workspace = true } env_logger = { workspace = true } -types = { workspace = true } -state_processing = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +eth2_network_config = { workspace = true } +eth2_wallet = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } -environment = { workspace = true } -eth2_network_config = { workspace = true } -deposit_contract = { workspace = true } -tree_hash = { workspace = true } -clap_utils = { workspace = true } -lighthouse_network = { workspace = true } -validator_dir = { workspace = true } -lighthouse_version = { workspace = true } -account_utils = { workspace = true } -eth2_wallet = { workspace = true } -eth2 = { workspace = true } -snap = { workspace = true } -beacon_chain = { workspace = true } -store = { workspace = true } -malloc_utils = { workspace = true } -rayon = { workspace = true } execution_layer = { workspace = true } hex = { workspace = true } +lighthouse_network = { workspace = true } +lighthouse_version = { workspace = true } +log = { workspace = true } +malloc_utils = { workspace = true } +rayon = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +sloggers = { workspace = true } +snap = { workspace = true } +state_processing = { workspace = true } +store = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +validator_dir = { workspace = true } [package.metadata.cargo-udeps.ignore] normal = ["malloc_utils"] diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 1c91b18e9c..eda9a2ebf2 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -34,48 +34,47 @@ malloc_utils = { workspace = true, features = ["jemalloc"] } malloc_utils = { workspace = true } [dependencies] -beacon_node = { workspace = true } -slog = { workspace = true } -types = { workspace = true } -bls = { workspace = true } -ethereum_hashing = { workspace = true } -clap = { workspace = true } -environment = { workspace = true } -boot_node = { path = "../boot_node" } -futures = { workspace = true } -validator_client = { workspace = true } account_manager = { "path" = "../account_manager" } -clap_utils = { workspace = true } -eth2_network_config = { workspace = true } -lighthouse_version = { workspace = true } account_utils = { workspace = true } +beacon_node = { workspace = true } +bls = { workspace = true } +boot_node = { path = "../boot_node" } +clap = { workspace = true } +clap_utils = { workspace = true } +database_manager = { path = "../database_manager" } +directory = { workspace = true } +environment = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_hashing = { workspace = true } +futures = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } +malloc_utils = { workspace = true } metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } -task_executor = { workspace = true } -malloc_utils = { workspace = true } -directory = { workspace = true } -unused_port = { workspace = true } -database_manager = { path = "../database_manager" } slasher = { workspace = true } +slog = { workspace = true } +task_executor = { workspace = true } +types = { workspace = true } +unused_port = { workspace = true } +validator_client = { workspace = true } validator_manager = { path = "../validator_manager" } -logging = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } -validator_dir = { workspace = true } -slashing_protection = { workspace = true } -lighthouse_network = { workspace = true } -sensitive_url = { workspace = true } +beacon_node_fallback = { workspace = true } +beacon_processor = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } -beacon_processor = { workspace = true } -beacon_node_fallback = { workspace = true } initialized_validators = { workspace = true } +lighthouse_network = { workspace = true } +sensitive_url = { workspace = true } +slashing_protection = { workspace = true } +tempfile = { workspace = true } +validator_dir = { workspace = true } zeroize = { workspace = true } - [[test]] name = "lighthouse_tests" path = "tests/main.rs" diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index f95751392c..02b8e0b655 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -6,19 +6,19 @@ edition = { workspace = true } [dependencies] async-channel = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } -sloggers = { workspace = true } -types = { workspace = true } eth2_config = { workspace = true } -task_executor = { workspace = true } eth2_network_config = { workspace = true } -logging = { workspace = true } -slog-term = { workspace = true } -slog-async = { workspace = true } futures = { workspace = true } -slog-json = "2.3.0" +logging = { workspace = true } serde = { workspace = true } +slog = { workspace = true } +slog-async = { workspace = true } +slog-json = "2.3.0" +slog-term = { workspace = true } +sloggers = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 56a023df0b..fcecc2fc23 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -17,31 +17,31 @@ byteorder = { workspace = true } derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } -metrics = { workspace = true } filesystem = { workspace = true } +flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } +lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lru = { workspace = true } -parking_lot = { workspace = true } -rand = { workspace = true } -safe_arith = { workspace = true } -serde = { workspace = true } -slog = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } -types = { workspace = true } -strum = { workspace = true } -ssz_types = { workspace = true } # MDBX is pinned at the last version with Windows and macOS support. mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = "e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a", optional = true } -lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } -lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +metrics = { workspace = true } +parking_lot = { workspace = true } +rand = { workspace = true } redb = { version = "2.1.4", optional = true } +safe_arith = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +ssz_types = { workspace = true } +strum = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +types = { workspace = true } [dev-dependencies] +logging = { workspace = true } maplit = { workspace = true } rayon = { workspace = true } tempfile = { workspace = true } -logging = { workspace = true } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 6012283e11..d93f3a5578 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -12,28 +12,28 @@ portable = ["beacon_chain/portable"] [dependencies] alloy-primitives = { workspace = true } +beacon_chain = { workspace = true } bls = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } derivative = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +fs2 = { workspace = true } hex = { workspace = true } kzg = { workspace = true } +logging = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } -eth2_network_config = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } +snap = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } types = { workspace = true } -snap = { workspace = true } -fs2 = { workspace = true } -beacon_chain = { workspace = true } -fork_choice = { workspace = true } -execution_layer = { workspace = true } -logging = { workspace = true } diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index c76ef91183..9b0ac5ec9b 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -5,12 +5,12 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -tokio = { workspace = true } +deposit_contract = { workspace = true } +ethers-contract = "1.0.2" ethers-core = { workspace = true } ethers-providers = { workspace = true } -ethers-contract = "1.0.2" -types = { workspace = true } -serde_json = { workspace = true } -deposit_contract = { workspace = true } -unused_port = { workspace = true } hex = { workspace = true } +serde_json = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +unused_port = { workspace = true } diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 159561d5dd..28ff944799 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -5,22 +5,22 @@ edition = { workspace = true } [dependencies] async-channel = { workspace = true } -tempfile = { workspace = true } +deposit_contract = { workspace = true } +ethers-core = { workspace = true } +ethers-providers = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +logging = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } serde_json = { workspace = true } task_executor = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } -futures = { workspace = true } -execution_layer = { workspace = true } -sensitive_url = { workspace = true } types = { workspace = true } unused_port = { workspace = true } -ethers-providers = { workspace = true } -ethers-core = { workspace = true } -deposit_contract = { workspace = true } -reqwest = { workspace = true } -hex = { workspace = true } -fork_choice = { workspace = true } -logging = { workspace = true } [features] portable = ["types/portable"] diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 97e73b8a2f..0d9db528da 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -5,14 +5,14 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -environment = { workspace = true } beacon_node = { workspace = true } -types = { workspace = true } -tempfile = { workspace = true } -eth2 = { workspace = true } -validator_client = { workspace = true } beacon_node_fallback = { workspace = true } -validator_dir = { workspace = true, features = ["insecure_keys"] } -sensitive_url = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } execution_layer = { workspace = true } +sensitive_url = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } +types = { workspace = true } +validator_client = { workspace = true } +validator_dir = { workspace = true, features = ["insecure_keys"] } diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 7772523284..77645dba45 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -3,20 +3,19 @@ name = "simulator" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -node_test_rig = { path = "../node_test_rig" } -execution_layer = { workspace = true } -types = { workspace = true } -parking_lot = { workspace = true } -futures = { workspace = true } -tokio = { workspace = true } -env_logger = { workspace = true } clap = { workspace = true } +env_logger = { workspace = true } +eth2_network_config = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } +kzg = { workspace = true } +node_test_rig = { path = "../node_test_rig" } +parking_lot = { workspace = true } rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } -eth2_network_config = { workspace = true } serde_json = { workspace = true } -kzg = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 142a657f07..7c29715346 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -3,15 +3,14 @@ name = "state_transition_vectors" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -state_processing = { workspace = true } -types = { workspace = true } -ethereum_ssz = { workspace = true } beacon_chain = { workspace = true } +ethereum_ssz = { workspace = true } +state_processing = { workspace = true } tokio = { workspace = true } +types = { workspace = true } [features] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/testing/test-test_logger/Cargo.toml b/testing/test-test_logger/Cargo.toml index 63bb87c06e..d2d705f714 100644 --- a/testing/test-test_logger/Cargo.toml +++ b/testing/test-test_logger/Cargo.toml @@ -2,7 +2,6 @@ name = "test-test_logger" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 0096d74f64..376aa13406 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -2,31 +2,30 @@ name = "web3signer_tests" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] [dev-dependencies] +account_utils = { workspace = true } async-channel = { workspace = true } +environment = { workspace = true } eth2_keystore = { workspace = true } -types = { workspace = true } +eth2_network_config = { workspace = true } +futures = { workspace = true } +initialized_validators = { workspace = true } +logging = { workspace = true } +parking_lot = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +slashing_protection = { workspace = true } +slot_clock = { workspace = true } +task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } -reqwest = { workspace = true } +types = { workspace = true } url = { workspace = true } -slot_clock = { workspace = true } -futures = { workspace = true } -task_executor = { workspace = true } -environment = { workspace = true } -account_utils = { workspace = true } -serde = { workspace = true } -serde_yaml = { workspace = true } -eth2_network_config = { workspace = true } -serde_json = { workspace = true } -zip = { workspace = true } -parking_lot = { workspace = true } -logging = { workspace = true } -initialized_validators = { workspace = true } -slashing_protection = { workspace = true } validator_store = { workspace = true } +zip = { workspace = true } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 044a622d54..504d96ae1c 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -17,10 +17,11 @@ beacon_node_fallback = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } directory = { workspace = true } -doppelganger_service = { workspace = true } dirs = { workspace = true } -eth2 = { workspace = true } +doppelganger_service = { workspace = true } environment = { workspace = true } +eth2 = { workspace = true } +fdlimit = "0.3.0" graffiti_file = { workspace = true } hyper = { workspace = true } initialized_validators = { workspace = true } @@ -29,15 +30,14 @@ monitoring_api = { workspace = true } parking_lot = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } -slashing_protection = { workspace = true } serde = { workspace = true } +slashing_protection = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } +tokio = { workspace = true } types = { workspace = true } validator_http_api = { workspace = true } validator_http_metrics = { workspace = true } validator_metrics = { workspace = true } validator_services = { workspace = true } validator_store = { workspace = true } -tokio = { workspace = true } -fdlimit = "0.3.0" diff --git a/validator_client/doppelganger_service/Cargo.toml b/validator_client/doppelganger_service/Cargo.toml index e5f7d3f2ba..66b61a411b 100644 --- a/validator_client/doppelganger_service/Cargo.toml +++ b/validator_client/doppelganger_service/Cargo.toml @@ -17,4 +17,4 @@ types = { workspace = true } [dev-dependencies] futures = { workspace = true } -logging = {workspace = true } +logging = { workspace = true } diff --git a/validator_client/graffiti_file/Cargo.toml b/validator_client/graffiti_file/Cargo.toml index 02e48849d1..8868f5aec8 100644 --- a/validator_client/graffiti_file/Cargo.toml +++ b/validator_client/graffiti_file/Cargo.toml @@ -9,11 +9,11 @@ name = "graffiti_file" path = "src/lib.rs" [dependencies] -serde = { workspace = true } bls = { workspace = true } -types = { workspace = true } +serde = { workspace = true } slog = { workspace = true } +types = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } hex = { workspace = true } +tempfile = { workspace = true } diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 96c836f6f3..76a021ab8c 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -10,25 +10,25 @@ path = "src/lib.rs" [dependencies] account_utils = { workspace = true } -bls = { workspace = true } beacon_node_fallback = { workspace = true } +bls = { workspace = true } deposit_contract = { workspace = true } directory = { workspace = true } -doppelganger_service = { workspace = true } dirs = { workspace = true } -graffiti_file = { workspace = true } +doppelganger_service = { workspace = true } eth2 = { workspace = true } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } +filesystem = { workspace = true } +graffiti_file = { workspace = true } initialized_validators = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } parking_lot = { workspace = true } -filesystem = { workspace = true } rand = { workspace = true } +sensitive_url = { workspace = true } serde = { workspace = true } signing_method = { workspace = true } -sensitive_url = { workspace = true } slashing_protection = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } @@ -39,15 +39,15 @@ tempfile = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } types = { workspace = true } -validator_dir = { workspace = true } -validator_store = { workspace = true } -validator_services = { workspace = true } url = { workspace = true } -warp_utils = { workspace = true } +validator_dir = { workspace = true } +validator_services = { workspace = true } +validator_store = { workspace = true } warp = { workspace = true } +warp_utils = { workspace = true } zeroize = { workspace = true } [dev-dependencies] -itertools = { workspace = true } futures = { workspace = true } +itertools = { workspace = true } rand = { workspace = true, features = ["small_rng"] } diff --git a/validator_client/http_metrics/Cargo.toml b/validator_client/http_metrics/Cargo.toml index a9de26a55b..c29a4d18fa 100644 --- a/validator_client/http_metrics/Cargo.toml +++ b/validator_client/http_metrics/Cargo.toml @@ -5,16 +5,16 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +lighthouse_version = { workspace = true } malloc_utils = { workspace = true } -slot_clock = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } slog = { workspace = true } -warp_utils = { workspace = true } -warp = { workspace = true } -lighthouse_version = { workspace = true } +slot_clock = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } validator_services = { workspace = true } validator_store = { workspace = true } -validator_metrics = { workspace = true } -types = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } diff --git a/validator_client/initialized_validators/Cargo.toml b/validator_client/initialized_validators/Cargo.toml index 9c7a3f19d6..05e85261f9 100644 --- a/validator_client/initialized_validators/Cargo.toml +++ b/validator_client/initialized_validators/Cargo.toml @@ -5,23 +5,23 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] -signing_method = { workspace = true } account_utils = { workspace = true } +bincode = { workspace = true } +bls = { workspace = true } eth2_keystore = { workspace = true } -metrics = { workspace = true } +filesystem = { workspace = true } lockfile = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } +rand = { workspace = true } reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +signing_method = { workspace = true } slog = { workspace = true } +tokio = { workspace = true } types = { workspace = true } url = { workspace = true } validator_dir = { workspace = true } -rand = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -bls = { workspace = true } -tokio = { workspace = true } -bincode = { workspace = true } -filesystem = { workspace = true } validator_metrics = { workspace = true } zeroize = { workspace = true } diff --git a/validator_client/signing_method/Cargo.toml b/validator_client/signing_method/Cargo.toml index 0f3852eff6..3e1a48142f 100644 --- a/validator_client/signing_method/Cargo.toml +++ b/validator_client/signing_method/Cargo.toml @@ -6,12 +6,12 @@ authors = ["Sigma Prime "] [dependencies] eth2_keystore = { workspace = true } +ethereum_serde_utils = { workspace = true } lockfile = { workspace = true } parking_lot = { workspace = true } reqwest = { workspace = true } +serde = { workspace = true } task_executor = { workspace = true } types = { workspace = true } url = { workspace = true } validator_metrics = { workspace = true } -serde = { workspace = true } -ethereum_serde_utils = { workspace = true } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 6982958bd5..1a098742d8 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -10,16 +10,16 @@ name = "slashing_protection_tests" path = "tests/main.rs" [dependencies] -tempfile = { workspace = true } -types = { workspace = true } -rusqlite = { workspace = true } -r2d2 = { workspace = true } -r2d2_sqlite = "0.21.0" -serde = { workspace = true } -serde_json = { workspace = true } +arbitrary = { workspace = true, features = ["derive"] } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } -arbitrary = { workspace = true, features = ["derive"] } +r2d2 = { workspace = true } +r2d2_sqlite = "0.21.0" +rusqlite = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +tempfile = { workspace = true } +types = { workspace = true } [dev-dependencies] rayon = { workspace = true } diff --git a/validator_client/validator_services/Cargo.toml b/validator_client/validator_services/Cargo.toml index 7dcd815541..21f0ae2d77 100644 --- a/validator_client/validator_services/Cargo.toml +++ b/validator_client/validator_services/Cargo.toml @@ -6,18 +6,18 @@ authors = ["Sigma Prime "] [dependencies] beacon_node_fallback = { workspace = true } -validator_metrics = { workspace = true } -validator_store = { workspace = true } -graffiti_file = { workspace = true } +bls = { workspace = true } doppelganger_service = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } +graffiti_file = { workspace = true } parking_lot = { workspace = true } safe_arith = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } tokio = { workspace = true } -types = { workspace = true } tree_hash = { workspace = true } -bls = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } +validator_store = { workspace = true } diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 36df256841..7cb05616f4 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -2,28 +2,27 @@ name = "validator_manager" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = { workspace = true } -types = { workspace = true } -environment = { workspace = true } -eth2_network_config = { workspace = true } -clap_utils = { workspace = true } -eth2_wallet = { workspace = true } account_utils = { workspace = true } +clap = { workspace = true } +clap_utils = { workspace = true } +derivative = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +eth2_network_config = { workspace = true } +eth2_wallet = { workspace = true } +ethereum_serde_utils = { workspace = true } +hex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -ethereum_serde_utils = { workspace = true } -tree_hash = { workspace = true } -eth2 = { workspace = true } -hex = { workspace = true } tokio = { workspace = true } -derivative = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } zeroize = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } regex = { workspace = true } +tempfile = { workspace = true } validator_http_api = { workspace = true } diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 9e8da3b293..41cfb58e28 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -10,37 +10,36 @@ path = "src/lib.rs" [[bin]] name = "watch" path = "src/main.rs" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +axum = "0.7" +beacon_node = { workspace = true } +bls = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -log = { workspace = true } -env_logger = { workspace = true } -types = { workspace = true } -eth2 = { workspace = true } -beacon_node = { workspace = true } -tokio = { workspace = true } -axum = "0.7" -hyper = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -reqwest = { workspace = true } -url = { workspace = true } -rand = { workspace = true } diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } diesel_migrations = { version = "2.0.0", features = ["postgres"] } -bls = { workspace = true } +env_logger = { workspace = true } +eth2 = { workspace = true } +hyper = { workspace = true } +log = { workspace = true } r2d2 = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } serde_yaml = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +url = { workspace = true } [dev-dependencies] -tokio-postgres = "0.7.5" -http_api = { workspace = true } beacon_chain = { workspace = true } -network = { workspace = true } -testcontainers = "0.15" -unused_port = { workspace = true } -task_executor = { workspace = true } +http_api = { workspace = true } logging = { workspace = true } +network = { workspace = true } +task_executor = { workspace = true } +testcontainers = "0.15" +tokio-postgres = "0.7.5" +unused_port = { workspace = true } From 07e82dabc06f8e0f7d92ca3edf4ca061899e20da Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 19 Dec 2024 16:46:06 +1100 Subject: [PATCH 23/25] Delete OTB verification service (#6631) * Delete OTB verification service * Merge branch 'unstable' into delete-otb --- .../beacon_chain/src/execution_payload.rs | 4 - beacon_node/beacon_chain/src/lib.rs | 1 - .../src/otb_verification_service.rs | 381 ------------------ beacon_node/client/src/builder.rs | 2 - beacon_node/store/src/lib.rs | 2 +- 5 files changed, 1 insertion(+), 389 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/otb_verification_service.rs diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 5e13f0624d..92d24c53c0 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -7,7 +7,6 @@ //! So, this module contains functions that one might expect to find in other crates, but they live //! here for good reason. -use crate::otb_verification_service::OptimisticTransitionBlock; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, @@ -284,9 +283,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( "block_hash" => ?execution_payload.parent_hash(), "msg" => "the terminal block/parent was unavailable" ); - // Store Optimistic Transition Block in Database for later Verification - OptimisticTransitionBlock::from_block(block) - .persist_in_store::(&chain.store)?; Ok(()) } else { Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 2953516fb1..d9728b9fd4 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -47,7 +47,6 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; -pub mod otb_verification_service; mod persisted_beacon_chain; mod persisted_fork_choice; mod pre_finalization_cache; diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs deleted file mode 100644 index 31034a7d59..0000000000 --- a/beacon_node/beacon_chain/src/otb_verification_service.rs +++ /dev/null @@ -1,381 +0,0 @@ -use crate::execution_payload::{validate_merge_block, AllowOptimisticImport}; -use crate::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, -}; -use itertools::process_results; -use proto_array::InvalidationOperation; -use slog::{crit, debug, error, info, warn}; -use slot_clock::SlotClock; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_block_processing::is_merge_transition_complete; -use std::sync::Arc; -use store::{DBColumn, Error as StoreError, HotColdDB, KeyValueStore, StoreItem}; -use task_executor::{ShutdownReason, TaskExecutor}; -use tokio::time::sleep; -use tree_hash::TreeHash; -use types::{BeaconBlockRef, EthSpec, Hash256, Slot}; -use DBColumn::OptimisticTransitionBlock as OTBColumn; - -#[derive(Clone, Debug, Decode, Encode, PartialEq)] -pub struct OptimisticTransitionBlock { - root: Hash256, - slot: Slot, -} - -impl OptimisticTransitionBlock { - // types::BeaconBlockRef<'_, ::EthSpec> - pub fn from_block(block: BeaconBlockRef) -> Self { - Self { - root: block.tree_hash_root(), - slot: block.slot(), - } - } - - pub fn root(&self) -> &Hash256 { - &self.root - } - - pub fn slot(&self) -> &Slot { - &self.slot - } - - pub fn persist_in_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - if store - .as_ref() - .item_exists::(&self.root)? - { - Ok(()) - } else { - store.as_ref().put_item(&self.root, self) - } - } - - pub fn remove_from_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - store - .as_ref() - .hot_db - .key_delete(OTBColumn.into(), self.root.as_slice()) - } - - fn is_canonical( - &self, - chain: &BeaconChain, - ) -> Result { - Ok(chain - .forwards_iter_block_roots_until(self.slot, self.slot)? - .next() - .transpose()? - .map(|(root, _)| root) - == Some(self.root)) - } -} - -impl StoreItem for OptimisticTransitionBlock { - fn db_column() -> DBColumn { - OTBColumn - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} - -/// The routine is expected to run once per epoch, 1/4th through the epoch. -pub const EPOCH_DELAY_FACTOR: u32 = 4; - -/// Spawns a routine which checks the validity of any optimistically imported transition blocks -/// -/// This routine will run once per epoch, at `epoch_duration / EPOCH_DELAY_FACTOR` after -/// the start of each epoch. -/// -/// The service will not be started if there is no `execution_layer` on the `chain`. -pub fn start_otb_verification_service( - executor: TaskExecutor, - chain: Arc>, -) { - // Avoid spawning the service if there's no EL, it'll just error anyway. - if chain.execution_layer.is_some() { - executor.spawn( - async move { otb_verification_service(chain).await }, - "otb_verification_service", - ); - } -} - -pub fn load_optimistic_transition_blocks( - chain: &BeaconChain, -) -> Result, StoreError> { - process_results( - chain.store.hot_db.iter_column::(OTBColumn), - |iter| { - iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) - .collect() - }, - )? -} - -#[derive(Debug)] -pub enum Error { - ForkChoice(String), - BeaconChain(BeaconChainError), - StoreError(StoreError), - NoBlockFound(OptimisticTransitionBlock), -} - -pub async fn validate_optimistic_transition_blocks( - chain: &Arc>, - otbs: Vec, -) -> Result<(), Error> { - let finalized_slot = chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_err(|e| Error::ForkChoice(format!("{:?}", e)))? - .slot; - - // separate otbs into - // non-canonical - // finalized canonical - // unfinalized canonical - let mut non_canonical_otbs = vec![]; - let (finalized_canonical_otbs, unfinalized_canonical_otbs) = process_results( - otbs.into_iter().map(|otb| { - otb.is_canonical(chain) - .map(|is_canonical| (otb, is_canonical)) - }), - |pair_iter| { - pair_iter - .filter_map(|(otb, is_canonical)| { - if is_canonical { - Some(otb) - } else { - non_canonical_otbs.push(otb); - None - } - }) - .partition::, _>(|otb| *otb.slot() <= finalized_slot) - }, - ) - .map_err(Error::BeaconChain)?; - - // remove non-canonical blocks that conflict with finalized checkpoint from the database - for otb in non_canonical_otbs { - if *otb.slot() <= finalized_slot { - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - } - } - - // ensure finalized canonical otb are valid, otherwise kill client - for otb in finalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - chain.log, - "Validated merge transition block"; - "block_root" => ?otb.root(), - "type" => "finalized" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Finalized Merge Transition Block is Invalid! Kill the Client! - crit!( - chain.log, - "Finalized merge transition block is invalid!"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block.canonical_root() - ); - let mut shutdown_sender = chain.shutdown_sender(); - if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, - )) { - crit!( - chain.log, - "Failed to shut down client"; - "error" => ?e, - "shutdown_reason" => INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON - ); - } - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - // attempt to validate any non-finalized canonical otb blocks - for otb in unfinalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - chain.log, - "Validated merge transition block"; - "block_root" => ?otb.root(), - "type" => "not finalized" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Unfinalized Merge Transition Block is Invalid -> Run process_invalid_execution_payload - warn!( - chain.log, - "Merge transition block invalid"; - "block_root" => ?otb.root() - ); - chain - .process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: *otb.root(), - }, - ) - .await - .map_err(|e| { - warn!( - chain.log, - "Error checking merge transition block"; - "error" => ?e, - "location" => "process_invalid_execution_payload" - ); - Error::BeaconChain(e) - })?; - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - Ok(()) -} - -/// Loop until any optimistically imported merge transition blocks have been verified and -/// the merge has been finalized. -async fn otb_verification_service(chain: Arc>) { - let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; - loop { - match chain - .slot_clock - .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) - { - Some(duration) => { - let additional_delay = epoch_duration / EPOCH_DELAY_FACTOR; - sleep(duration + additional_delay).await; - - debug!( - chain.log, - "OTB verification service firing"; - ); - - if !is_merge_transition_complete( - &chain.canonical_head.cached_head().snapshot.beacon_state, - ) { - // We are pre-merge. Nothing to do yet. - continue; - } - - // load all optimistically imported transition blocks from the database - match load_optimistic_transition_blocks(chain.as_ref()) { - Ok(otbs) => { - if otbs.is_empty() { - if chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_or(false, |block| { - block.execution_status.is_execution_enabled() - }) - { - // there are no optimistic blocks in the database, we can exit - // the service since the merge transition is finalized and we'll - // never see another transition block - break; - } else { - debug!( - chain.log, - "No optimistic transition blocks"; - "info" => "waiting for the merge transition to finalize" - ) - } - } - if let Err(e) = validate_optimistic_transition_blocks(&chain, otbs).await { - warn!( - chain.log, - "Error while validating optimistic transition blocks"; - "error" => ?e - ); - } - } - Err(e) => { - error!( - chain.log, - "Error loading optimistic transition blocks"; - "error" => ?e - ); - } - }; - } - None => { - error!(chain.log, "Failed to read slot clock"); - // If we can't read the slot clock, just wait another slot. - sleep(chain.slot_clock.slot_duration()).await; - } - }; - } - debug!( - chain.log, - "No optimistic transition blocks in database"; - "msg" => "shutting down OTB verification service" - ); -} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 961f5140f9..7c6a253aca 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -7,7 +7,6 @@ use crate::Client; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; -use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ @@ -970,7 +969,6 @@ where } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); - start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone()); start_availability_cache_maintenance_service( runtime_context.executor.clone(), beacon_chain.clone(), diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 0498c7c1e2..09ae9a32dd 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -332,7 +332,7 @@ pub enum DBColumn { BeaconRandaoMixes, #[strum(serialize = "dht")] DhtEnrs, - /// For Optimistically Imported Merge Transition Blocks + /// DEPRECATED. For Optimistically Imported Merge Transition Blocks #[strum(serialize = "otb")] OptimisticTransitionBlock, /// DEPRECATED. Can be removed once schema v22 is buried by a hard fork. From 502239871508b6a39f27a6fc54c82d46d59f9bca Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 19 Dec 2024 13:46:10 +0800 Subject: [PATCH 24/25] Revise Siren documentation (#6553) * revise Siren doc * Fix broken links * Fix broken links * broken links * mdlint * mdlint * mdlint again * Merge branch 'unstable' into book-siren * test whether I have the required privs :-) * revise * some minor siren related changes for the book * updates re: `--net=host` * lint * Minor revision * Add note * mdlint * Merge branch 'unstable' into book-siren * Merge branch 'unstable' into book-siren * Merge remote-tracking branch 'origin/unstable' into book-siren * Fix spellcheck * Capital letters SSL --- book/src/SUMMARY.md | 3 +- book/src/api-vc-auth-header.md | 4 +- book/src/lighthouse-ui.md | 1 - book/src/ui-authentication.md | 4 +- book/src/ui-configuration.md | 121 +++++++++++++++++++++++++++------ book/src/ui-faqs.md | 7 +- wordlist.txt | 1 - 7 files changed, 109 insertions(+), 32 deletions(-) diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 02683a1172..44d7702e5f 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -33,9 +33,8 @@ * [Signature Header](./api-vc-sig-header.md) * [Prometheus Metrics](./advanced_metrics.md) * [Lighthouse UI (Siren)](./lighthouse-ui.md) - * [Installation](./ui-installation.md) - * [Authentication](./ui-authentication.md) * [Configuration](./ui-configuration.md) + * [Authentication](./ui-authentication.md) * [Usage](./ui-usage.md) * [FAQs](./ui-faqs.md) * [Advanced Usage](./advanced.md) diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index feb93724c0..f792ee870e 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -20,11 +20,11 @@ Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh The API token is stored as a file in the `validators` directory. For most users this is `~/.lighthouse/{network}/validators/api-token.txt`, unless overridden using the `--http-token-path` CLI parameter. Here's an -example using the `cat` command to print the token to the terminal, but any +example using the `cat` command to print the token for mainnet to the terminal, but any text editor will suffice: ```bash -cat api-token.txt +cat ~/.lighthouse/mainnet/validators/api-token.txt hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ``` diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md index 106a5e8947..f2662f4a69 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/lighthouse-ui.md @@ -21,7 +21,6 @@ The UI is currently in active development. It resides in the See the following Siren specific topics for more context-specific information: -- [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. - [Configuration Guide](./ui-configuration.md) - Explanation of how to setup and configure Siren. - [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. diff --git a/book/src/ui-authentication.md b/book/src/ui-authentication.md index 9e3a94db78..81b867bae2 100644 --- a/book/src/ui-authentication.md +++ b/book/src/ui-authentication.md @@ -2,12 +2,12 @@ ## Siren Session -For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of user validators. The session password must be set during the [installation](./ui-installation.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. +For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of the user's validators. The session password must be set during the [configuration](./ui-configuration.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. ![exit](imgs/ui-session.png) ## Protected Actions -Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [installation process](./ui-installation.md). +Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [configuration process](./ui-configuration.md). ![exit](imgs/ui-auth.png) diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md index eeb2c9a51c..34cc9fe7ca 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui-configuration.md @@ -1,37 +1,116 @@ -# Configuration +# 📦 Installation + +Siren supports any operating system that supports containers and/or NodeJS 18, this includes Linux, MacOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren). + +## Version Requirement + +To ensure proper functionality, the Siren app requires Lighthouse v4.3.0 or higher. You can find these versions on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. + +## Configuration Siren requires a connection to both a Lighthouse Validator Client and a Lighthouse Beacon Node. -To enable connection, you must generate .env file based on the provided .env.example - -## Connecting to the Clients Both the Beacon node and the Validator client need to have their HTTP APIs enabled. -These ports should be accessible from Siren. +These ports should be accessible from Siren. This means adding the flag `--http` on both beacon node and validator client. To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This action ensures that the HTTP API can be accessed by other software on the same machine. > The Beacon Node must be run with the `--gui` flag set. -If you require accessibility from another machine within the network, configure the `--http-address` to match the local LAN IP of the system running the Beacon Node and Validator Client. +## Running the Docker container (Recommended) -> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. +We recommend running Siren's container next to your beacon node (on the same server), as it's essentially a webapp that you can access with any browser. -In a similar manner, the validator client requires activation of the `--http` flag, along with the optional consideration of configuring the `--http-address` flag. If `--http-address` flag is set on the Validator Client, then the `--unencrypted-http-transport` flag is required as well. These settings will ensure compatibility with Siren's connectivity requirements. + 1. Create a directory to run Siren: -If you run the Docker container, it will fail to startup if your BN/VC are not accessible, or if you provided a wrong API token. + ```bash + cd ~ + mkdir Siren + cd Siren + ``` -## API Token + 1. Create a configuration file in the `Siren` directory: `nano .env` and insert the following fields to the `.env` file. The field values are given here as an example, modify the fields as necessary. For example, the `API_TOKEN` can be obtained from [`Validator Client Authorization Header`](./api-vc-auth-header.md) -The API Token is a secret key that allows you to connect to the validator -client. The validator client's HTTP API is guarded by this key because it -contains sensitive validator information and the ability to modify -validators. Please see [`Validator Authorization`](./api-vc-auth-header.md) -for further details. + A full example with all possible configuration options can be found [here](https://github.com/sigp/siren/blob/stable/.env.example). -Siren requires this token in order to connect to the Validator client. -The token is located in the default data directory of the validator -client. The default path is -`~/.lighthouse//validators/api-token.txt`. + ``` + BEACON_URL=http://localhost:5052 + VALIDATOR_URL=http://localhost:5062 + API_TOKEN=R6YhbDO6gKjNMydtZHcaCovFbQ0izq5Hk + SESSION_PASSWORD=your_password + ``` -The contents of this file for the desired validator client needs to be -entered. + 1. You can now start Siren with: + + ```bash + docker run --rm -ti --name siren --env-file $PWD/.env --net host sigp/siren + ``` + + Note that, due to the `--net=host` flag, this will expose Siren on ports 3000, 80, and 443. Preferably, only the latter should be accessible. Adjust your firewall and/or skip the flag wherever possible. + + If it fails to start, an error message will be shown. For example, the error + + ``` + http://localhost:5062 unreachable, check settings and connection + ``` + + means that the validator client is not running, or the `--http` flag is not provided, or otherwise inaccessible from within the container. Another common error is: + + ``` + validator api issue, server response: 403 + ``` + + which means that the API token is incorrect. Check that you have provided the correct token in the field `API_TOKEN` in `.env`. + + When Siren has successfully started, you should see the log `LOG [NestApplication] Nest application successfully started +118ms`, indicating that Siren has started. + + 1. Siren is now accessible at `https://` (when used with `--net=host`). You will get a warning about an invalid certificate, this can be safely ignored. + + > Note: We recommend setting a strong password when running Siren to protect it from unauthorized access. + +Advanced users can mount their own certificates or disable SSL altogether, see the `SSL Certificates` section below. + +## Building From Source + +### Docker + +The docker image can be built with the following command: +`docker build -f Dockerfile -t siren .` + +### Building locally + +To build from source, ensure that your system has `Node v18.18` and `yarn` installed. + +#### Build and run the backend + +Navigate to the backend directory `cd backend`. Install all required Node packages by running `yarn`. Once the installation is complete, compile the backend with `yarn build`. Deploy the backend in a production environment, `yarn start:production`. This ensures optimal performance. + +#### Build and run the frontend + +After initializing the backend, return to the root directory. Install all frontend dependencies by executing `yarn`. Build the frontend using `yarn build`. Start the frontend production server with `yarn start`. + +This will allow you to access siren at `http://localhost:3000` by default. + +## Advanced configuration + +### About self-signed SSL certificates + +By default, internally, Siren is running on port 80 (plain, behind nginx), port 3000 (plain, direct) and port 443 (with SSL, behind nginx)). Siren will generate and use a self-signed certificate on startup. This will generate a security warning when you try to access the interface. We recommend to only disable SSL if you would access Siren over a local LAN or otherwise highly trusted or encrypted network (i.e. VPN). + +#### Generating persistent SSL certificates and installing them to your system + +[mkcert](https://github.com/FiloSottile/mkcert) is a tool that makes it super easy to generate a self-signed certificate that is trusted by your browser. + +To use it for `siren`, install it following the instructions. Then, run `mkdir certs; mkcert -cert-file certs/cert.pem -key-file certs/key.pem 127.0.0.1 localhost` (add or replace any IP or hostname that you would use to access it at the end of this command). +To use these generated certificates, add this to to your `docker run` command: `-v $PWD/certs:/certs` + +The nginx SSL config inside Siren's container expects 3 files: `/certs/cert.pem` `/certs/key.pem` `/certs/key.pass`. If `/certs/cert.pem` does not exist, it will generate a self-signed certificate as mentioned above. If `/certs/cert.pem` does exist, it will attempt to use your provided or persisted certificates. + +### Configuration through environment variables + +For those who prefer to use environment variables to configure Siren instead of using an `.env` file, this is fully supported. In some cases this may even be preferred. + +#### Docker installed through `snap` + +If you installed Docker through a snap (i.e. on Ubuntu), Docker will have trouble accessing the `.env` file. In this case it is highly recommended to pass the config to the container with environment variables. +Note that the defaults in `.env.example` will be used as fallback, if no other value is provided. diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index 0887875316..29de889e5f 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -10,15 +10,16 @@ The required API token may be found in the default data directory of the validat ## 3. How do I fix the Node Network Errors? -If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). +If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui [`configuration`](./ui-configuration.md#configuration). ## 4. How do I connect Siren to Lighthouse from a different computer on the same network? -Siren is a webapp, you can access it like any other website. We don't recommend exposing it to the internet; if you require remote access a VPN or (authenticated) reverse proxy is highly recommended. +Siren is a webapp, you can access it like any other website. We don't recommend exposing it to the internet; if you require remote access a VPN or (authenticated) reverse proxy is highly recommended. +That being said, it is entirely possible to have it published over the internet, how to do that goes well beyond the scope of this document but we want to emphasize once more the need for *at least* SSL encryption if you choose to do so. ## 5. How can I use Siren to monitor my validators remotely when I am not at home? -Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). +Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`configuration`](./ui-configuration.md#configuration). ## 6. Does Siren support reverse proxy or DNS named addresses? diff --git a/wordlist.txt b/wordlist.txt index f06c278866..6287366cbc 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -194,7 +194,6 @@ rc reimport resync roadmap -runtime rustfmt rustup schemas From 42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 19 Dec 2024 14:09:44 +0700 Subject: [PATCH 25/25] Ensure non-zero bits for each committee bitfield comprising an aggregate (#6603) * add new validation --- .../src/common/get_attesting_indices.rs | 16 +++++++++++----- consensus/types/src/beacon_state.rs | 1 + 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index b131f7679a..842adce431 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -103,14 +103,14 @@ pub mod attesting_indices_electra { let committee_count_per_slot = committees.len() as u64; let mut participant_count = 0; - for index in committee_indices { + for committee_index in committee_indices { let beacon_committee = committees - .get(index as usize) - .ok_or(Error::NoCommitteeFound(index))?; + .get(committee_index as usize) + .ok_or(Error::NoCommitteeFound(committee_index))?; // This check is new to the spec's `process_attestation` in Electra. - if index >= committee_count_per_slot { - return Err(BeaconStateError::InvalidCommitteeIndex(index)); + if committee_index >= committee_count_per_slot { + return Err(BeaconStateError::InvalidCommitteeIndex(committee_index)); } participant_count.safe_add_assign(beacon_committee.committee.len() as u64)?; let committee_attesters = beacon_committee @@ -127,6 +127,12 @@ pub mod attesting_indices_electra { }) .collect::>(); + // Require at least a single non-zero bit for each attesting committee bitfield. + // This check is new to the spec's `process_attestation` in Electra. + if committee_attesters.is_empty() { + return Err(BeaconStateError::EmptyCommittee); + } + attesting_indices.extend(committee_attesters); committee_offset.safe_add_assign(beacon_committee.committee.len())?; } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 77b72b209c..ad4484b86a 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -59,6 +59,7 @@ pub enum Error { UnknownValidator(usize), UnableToDetermineProducer, InvalidBitfield, + EmptyCommittee, ValidatorIsWithdrawable, ValidatorIsInactive { val_index: usize,