From d8c6c57029aeb245bfba712fde5bcac955790f3a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 23 Oct 2025 13:56:09 +1100 Subject: [PATCH 01/44] Trigger backfill on startup if user switches to a supernode or semi-supernode (#8265) This PR adds backfill functionality to nodes switching to become a supernode or semi-supernode. Please note that we currently only support a CGC increase, i.e. if the node's already custodying 67 columns, switching to semi-supernode (64) will have no effect. From @eserilev > if a node's cgc increases on start up, we just need two things for custody backfill to do its thing > > - data column custody info needs to be updated to reflect the cgc change > - `CustodyContext::validator_registrations::epoch_validator_custody_requirements` needs to be updated to reflect the cgc change - [x] Add tests - [x] Test on devnet-3 - [x] switch to supernode - [x] switch to semisupernode - [x] Test on live testnets - [x] Update docs (functions) Co-Authored-By: Jimmy Chen --- beacon_node/beacon_chain/src/builder.rs | 28 +- .../beacon_chain/src/custody_context.rs | 513 +++++++++++++++--- 2 files changed, 474 insertions(+), 67 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 750cde14ca..719c24b956 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -931,18 +931,26 @@ where // Load the persisted custody context from the db and initialize // the context for this run - let custody_context = if let Some(custody) = + let (custody_context, cgc_changed_opt) = if let Some(custody) = load_custody_context::(store.clone()) { - Arc::new(CustodyContext::new_from_persisted_custody_context( + let head_epoch = canonical_head + .cached_head() + .head_slot() + .epoch(E::slots_per_epoch()); + CustodyContext::new_from_persisted_custody_context( custody, self.node_custody_type, + head_epoch, &self.spec, - )) + ) } else { - Arc::new(CustodyContext::new(self.node_custody_type, &self.spec)) + ( + CustodyContext::new(self.node_custody_type, &self.spec), + None, + ) }; - debug!(?custody_context, "Loading persisted custody context"); + debug!(?custody_context, "Loaded persisted custody context"); let beacon_chain = BeaconChain { spec: self.spec.clone(), @@ -1019,7 +1027,7 @@ where slot_clock, self.kzg.clone(), store, - custody_context, + Arc::new(custody_context), self.spec, ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, @@ -1062,6 +1070,14 @@ where return Err(format!("Weak subjectivity verification failed: {:?}", e)); } + if let Some(cgc_changed) = cgc_changed_opt { + // Update data column custody info if there's a CGC change from CLI flags. + // This will trigger column backfill. + let cgc_change_effective_slot = + cgc_changed.effective_epoch.start_slot(E::slots_per_epoch()); + beacon_chain.update_data_column_custody_info(Some(cgc_change_effective_slot)); + } + info!( head_state = %head.beacon_state_root(), head_block = %head.beacon_block_root, diff --git a/beacon_node/beacon_chain/src/custody_context.rs b/beacon_node/beacon_chain/src/custody_context.rs index 7ec13a8b51..0da0e7573e 100644 --- a/beacon_node/beacon_chain/src/custody_context.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -7,7 +7,7 @@ use std::{ collections::{BTreeMap, HashMap}, sync::atomic::{AtomicU64, Ordering}, }; -use tracing::warn; +use tracing::{debug, warn}; use types::data_column_custody_group::{CustodyIndex, compute_columns_for_custody_group}; use types::{ChainSpec, ColumnIndex, Epoch, EthSpec, Slot}; @@ -49,6 +49,10 @@ impl ValidatorRegistrations { /// /// If a `cgc_override` value is specified, the cgc value is inserted into the registration map /// and is equivalent to registering validator(s) with the same custody requirement. + /// + /// The node will backfill all the way back to either data_availability_boundary or fulu epoch, + /// and because this is a fresh node, setting the epoch to 0 is fine, as backfill will be done via + /// backfill sync instead of column backfill. fn new(cgc_override: Option) -> Self { let mut registrations = ValidatorRegistrations { validators: Default::default(), @@ -100,10 +104,9 @@ impl ValidatorRegistrations { let validator_custody_requirement = get_validators_custody_requirement(validator_custody_units, spec); - tracing::debug!( + debug!( validator_custody_units, - validator_custody_requirement, - "Registered validators" + validator_custody_requirement, "Registered validators" ); // If registering the new validator increased the total validator "units", then @@ -126,8 +129,11 @@ impl ValidatorRegistrations { } } - /// Updates the `epoch_validator_custody_requirements` map by pruning all values on/after `effective_epoch` - /// and updating the map to store the latest validator custody requirements for the `effective_epoch`. + /// Updates the `epoch -> cgc` map after custody backfill has been completed for + /// the specified epoch. + /// + /// This is done by pruning all values on/after `effective_epoch` and updating the map to store + /// the latest validator custody requirements for the `effective_epoch`. pub fn backfill_validator_custody_requirements(&mut self, effective_epoch: Epoch) { if let Some(latest_validator_custody) = self.latest_validator_custody_requirement() { // Delete records if @@ -247,39 +253,92 @@ impl CustodyContext { /// Restore the custody context from disk. /// - /// * If NodeCustodyType::custody_count < validator_custody_at_head, it means the attached - /// validate stake has increased the node's CGC. We ignore the CLI input. - /// * If NodeCustodyType::custody_count > validator_custody_at_head, it means the user has - /// changed the node's custody type via either the --supernode or --semi-supernode flags, - /// and will require a resync until we implement column backfill for this scenario. + /// # Behavior + /// * If [`NodeCustodyType::get_custody_count_override`] < validator_custody_at_head, it means + /// validators have increased the CGC beyond the derived CGC from cli flags. We ignore the CLI input. + /// * If [`NodeCustodyType::get_custody_count_override`] > validator_custody_at_head, it means the user has + /// changed the node's custody type via either the --supernode or --semi-supernode flags which + /// has resulted in a CGC increase. **The new CGC will be made effective from the next epoch**. + /// + /// # Returns + /// A tuple containing: + /// * `Self` - The restored custody context with updated CGC at head + /// * `Option` - `Some` if the CLI flag caused a CGC increase (triggering backfill), + /// `None` if no CGC change occurred or reduction was prevented pub fn new_from_persisted_custody_context( ssz_context: CustodyContextSsz, node_custody_type: NodeCustodyType, + head_epoch: Epoch, spec: &ChainSpec, - ) -> Self { - let cgc_override = node_custody_type.get_custody_count_override(spec); - if let Some(cgc_from_cli) = cgc_override - && cgc_from_cli > ssz_context.validator_custody_at_head - { - warn!( - info = "node will continue to run with the current custody count", - current_custody_count = ssz_context.validator_custody_at_head, - node_custody_type = ?node_custody_type, - "Changing node type is currently not supported without a resync and will have no effect", + ) -> (Self, Option) { + let CustodyContextSsz { + mut validator_custody_at_head, + mut epoch_validator_custody_requirements, + persisted_is_supernode: _, + } = ssz_context; + + let mut custody_count_changed = None; + + if let Some(cgc_from_cli) = node_custody_type.get_custody_count_override(spec) { + debug!( + ?node_custody_type, + persisted_custody_count = validator_custody_at_head, + "Initialising from persisted custody context" ); + + if cgc_from_cli > validator_custody_at_head { + // Make the CGC from CLI effective from the next epoch + let effective_epoch = head_epoch + 1; + let old_custody_group_count = validator_custody_at_head; + validator_custody_at_head = cgc_from_cli; + + let sampling_count = spec + .sampling_size_custody_groups(cgc_from_cli) + .expect("should compute node sampling size from valid chain spec"); + + epoch_validator_custody_requirements.push((effective_epoch, cgc_from_cli)); + + custody_count_changed = Some(CustodyCountChanged { + new_custody_group_count: validator_custody_at_head, + old_custody_group_count, + sampling_count, + effective_epoch, + }); + + debug!( + info = "new CGC will be effective from the next epoch", + ?node_custody_type, + old_cgc = old_custody_group_count, + new_cgc = validator_custody_at_head, + effective_epoch = %effective_epoch, + "Node custody type change caused a custody count increase", + ); + } else if cgc_from_cli < validator_custody_at_head { + // We don't currently support reducing CGC for simplicity. + // A common scenario is that user may restart with a CLI flag, but the validators + // are only attached later, and we end up having CGC inconsistency. + warn!( + info = "node will continue to run with the current custody count", + current_custody_count = validator_custody_at_head, + node_custody_type = ?node_custody_type, + "Reducing CGC is currently not supported without a resync and will have no effect", + ); + } } - CustodyContext { - validator_custody_count: AtomicU64::new(ssz_context.validator_custody_at_head), + + let custody_context = CustodyContext { + validator_custody_count: AtomicU64::new(validator_custody_at_head), validator_registrations: RwLock::new(ValidatorRegistrations { validators: Default::default(), - epoch_validator_custody_requirements: ssz_context - .epoch_validator_custody_requirements + epoch_validator_custody_requirements: epoch_validator_custody_requirements .into_iter() .collect(), }), all_custody_columns_ordered: OnceLock::new(), _phantom_data: PhantomData, - } + }; + + (custody_context, custody_count_changed) } /// Initializes an ordered list of data columns based on provided custody groups. @@ -331,7 +390,7 @@ impl CustodyContext { let current_cgc = self.validator_custody_count.load(Ordering::Relaxed); if new_validator_custody != current_cgc { - tracing::debug!( + debug!( old_count = current_cgc, new_count = new_validator_custody, "Validator count at head updated" @@ -342,10 +401,9 @@ impl CustodyContext { let updated_cgc = self.custody_group_count_at_head(spec); // Send the message to network only if there are more columns subnets to subscribe to if updated_cgc > current_cgc { - tracing::debug!( + debug!( old_cgc = current_cgc, - updated_cgc, - "Custody group count updated" + updated_cgc, "Custody group count updated" ); return Some(CustodyCountChanged { new_custody_group_count: updated_cgc, @@ -457,6 +515,8 @@ impl CustodyContext { &all_columns_ordered[..custody_group_count] } + /// The node has completed backfill for this epoch. Update the internal records so the function + /// [`Self::custody_columns_for_epoch()`] returns up-to-date results. pub fn update_and_backfill_custody_count_at_epoch(&self, effective_epoch: Epoch) { self.validator_registrations .write() @@ -464,8 +524,13 @@ impl CustodyContext { } } -/// The custody count changed because of a change in the -/// number of validators being managed. +/// Indicates that the custody group count (CGC) has increased. +/// +/// CGC increases can occur due to: +/// 1. Validator registrations increasing effective balance beyond current CGC +/// 2. CLI flag changes (e.g., switching to --supernode or --semi-supernode) +/// +/// This struct is used to trigger column backfill and network subnet subscription updates. pub struct CustodyCountChanged { pub new_custody_group_count: u64, pub old_custody_group_count: u64, @@ -509,6 +574,153 @@ mod tests { type E = MainnetEthSpec; + fn setup_custody_context( + spec: &ChainSpec, + head_epoch: Epoch, + epoch_and_cgc_tuples: Vec<(Epoch, u64)>, + ) -> CustodyContext { + let cgc_at_head = epoch_and_cgc_tuples.last().unwrap().1; + let ssz_context = CustodyContextSsz { + validator_custody_at_head: cgc_at_head, + persisted_is_supernode: false, + epoch_validator_custody_requirements: epoch_and_cgc_tuples, + }; + + let (custody_context, _) = CustodyContext::::new_from_persisted_custody_context( + ssz_context, + NodeCustodyType::Fullnode, + head_epoch, + spec, + ); + + let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + custody_context + .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, spec) + .expect("should initialise ordered data columns"); + custody_context + } + + fn complete_backfill_for_epochs( + custody_context: &CustodyContext, + start_epoch: Epoch, + end_epoch: Epoch, + ) { + assert!(start_epoch >= end_epoch); + // Call from end_epoch down to start_epoch (inclusive), simulating backfill + for epoch in (end_epoch.as_u64()..=start_epoch.as_u64()).rev() { + custody_context.update_and_backfill_custody_count_at_epoch(Epoch::new(epoch)); + } + } + + /// Helper function to test CGC increases when switching node custody types. + /// Verifies that CustodyCountChanged is returned with correct values and + /// that custody_group_count_at_epoch returns appropriate values for current and next epoch. + fn assert_custody_type_switch_increases_cgc( + persisted_cgc: u64, + target_node_custody_type: NodeCustodyType, + expected_new_cgc: u64, + head_epoch: Epoch, + spec: &ChainSpec, + ) { + let ssz_context = CustodyContextSsz { + validator_custody_at_head: persisted_cgc, + persisted_is_supernode: false, + epoch_validator_custody_requirements: vec![(Epoch::new(0), persisted_cgc)], + }; + + let (custody_context, custody_count_changed) = + CustodyContext::::new_from_persisted_custody_context( + ssz_context, + target_node_custody_type, + head_epoch, + spec, + ); + + // Verify CGC increased + assert_eq!( + custody_context.custody_group_count_at_head(spec), + expected_new_cgc, + "cgc should increase from {} to {}", + persisted_cgc, + expected_new_cgc + ); + + // Verify CustodyCountChanged is returned with correct values + let cgc_changed = custody_count_changed.expect("CustodyCountChanged should be returned"); + assert_eq!( + cgc_changed.new_custody_group_count, expected_new_cgc, + "new_custody_group_count should be {}", + expected_new_cgc + ); + assert_eq!( + cgc_changed.old_custody_group_count, persisted_cgc, + "old_custody_group_count should be {}", + persisted_cgc + ); + assert_eq!( + cgc_changed.effective_epoch, + head_epoch + 1, + "effective epoch should be head_epoch + 1" + ); + assert_eq!( + cgc_changed.sampling_count, + spec.sampling_size_custody_groups(expected_new_cgc) + .expect("should compute sampling size"), + "sampling_count should match expected value" + ); + + // Verify custody_group_count_at_epoch returns correct values + assert_eq!( + custody_context.custody_group_count_at_epoch(head_epoch, spec), + persisted_cgc, + "current epoch should still use old cgc ({})", + persisted_cgc + ); + assert_eq!( + custody_context.custody_group_count_at_epoch(head_epoch + 1, spec), + expected_new_cgc, + "next epoch should use new cgc ({})", + expected_new_cgc + ); + } + + /// Helper function to test CGC reduction prevention when switching node custody types. + /// Verifies that CGC stays at the persisted value and CustodyCountChanged is not returned. + fn assert_custody_type_switch_unchanged_cgc( + persisted_cgc: u64, + target_node_custody_type: NodeCustodyType, + head_epoch: Epoch, + spec: &ChainSpec, + ) { + let ssz_context = CustodyContextSsz { + validator_custody_at_head: persisted_cgc, + persisted_is_supernode: false, + epoch_validator_custody_requirements: vec![(Epoch::new(0), persisted_cgc)], + }; + + let (custody_context, custody_count_changed) = + CustodyContext::::new_from_persisted_custody_context( + ssz_context, + target_node_custody_type, + head_epoch, + spec, + ); + + // Verify CGC stays at persisted value (no reduction) + assert_eq!( + custody_context.custody_group_count_at_head(spec), + persisted_cgc, + "cgc should remain at {} (reduction not supported)", + persisted_cgc + ); + + // Verify no CustodyCountChanged is returned (no change occurred) + assert!( + custody_count_changed.is_none(), + "CustodyCountChanged should not be returned when CGC doesn't change" + ); + } + #[test] fn no_validators_supernode_default() { let spec = E::default_spec(); @@ -914,9 +1126,10 @@ mod tests { epoch_validator_custody_requirements: vec![], }; - let custody_context = CustodyContext::::new_from_persisted_custody_context( + let (custody_context, _) = CustodyContext::::new_from_persisted_custody_context( ssz_context, NodeCustodyType::Fullnode, + Epoch::new(0), &spec, ); @@ -927,51 +1140,155 @@ mod tests { ); } + /// Tests CLI flag change: Fullnode (CGC=0) → Supernode (CGC=128) + /// CGC should increase and trigger backfill via CustodyCountChanged. #[test] - fn restore_fullnode_then_switch_to_supernode_has_no_effect() { + fn restore_fullnode_then_switch_to_supernode_increases_cgc() { let spec = E::default_spec(); - let ssz_context = CustodyContextSsz { - validator_custody_at_head: 0, // no validators - persisted_is_supernode: false, - epoch_validator_custody_requirements: vec![], - }; + let head_epoch = Epoch::new(10); + let supernode_cgc = spec.number_of_custody_groups; - // Attempt to restore as supernode (wants 128), but should use original persisted value - let custody_context = CustodyContext::::new_from_persisted_custody_context( - ssz_context, + assert_custody_type_switch_increases_cgc( + 0, NodeCustodyType::Supernode, + supernode_cgc, + head_epoch, &spec, ); - - assert_eq!( - custody_context.custody_group_count_at_head(&spec), - spec.custody_requirement, - "should use original fullnode cgc, not supernode cgc" - ); } + /// Tests validator-driven CGC increase: Semi-supernode (CGC=64) → CGC=70 + /// Semi-supernode can exceed 64 when validator effective balance increases CGC. #[test] - fn restore_supernode_then_switch_to_fullnode_uses_persisted() { + fn restore_semi_supernode_with_validators_can_exceed_64() { let spec = E::default_spec(); - let supernode_cgc = spec.number_of_custody_groups; // supernode cgc + let semi_supernode_cgc = spec.number_of_custody_groups / 2; // 64 + let custody_context = CustodyContext::::new(NodeCustodyType::SemiSupernode, &spec); - let ssz_context = CustodyContextSsz { - validator_custody_at_head: supernode_cgc, - persisted_is_supernode: false, - epoch_validator_custody_requirements: vec![(Epoch::new(0), supernode_cgc)], - }; + // Verify initial CGC is 64 (semi-supernode) + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + semi_supernode_cgc, + "initial cgc should be 64" + ); - // Attempt to restore as fullnode (wants 8), but should keep persisted value (128) - let custody_context = CustodyContext::::new_from_persisted_custody_context( - ssz_context, - NodeCustodyType::Fullnode, + // Register validators with 70 custody units (exceeding semi-supernode default) + let validator_custody_units = 70; + let current_slot = Slot::new(10); + let cgc_changed = custody_context.register_validators( + vec![( + 0, + validator_custody_units * spec.balance_per_additional_custody_group, + )], + current_slot, &spec, ); + // Verify CGC increased from 64 to 70 + assert!( + cgc_changed.is_some(), + "CustodyCountChanged should be returned" + ); + let cgc_changed = cgc_changed.unwrap(); + assert_eq!( + cgc_changed.new_custody_group_count, validator_custody_units, + "cgc should increase to 70" + ); + assert_eq!( + cgc_changed.old_custody_group_count, semi_supernode_cgc, + "old cgc should be 64" + ); + + // Verify the custody context reflects the new CGC assert_eq!( custody_context.custody_group_count_at_head(&spec), + validator_custody_units, + "custody_group_count_at_head should be 70" + ); + } + + /// Tests CLI flag change prevention: Supernode (CGC=128) → Fullnode (CGC stays 128) + /// CGC reduction is not supported - persisted value is retained. + #[test] + fn restore_supernode_then_switch_to_fullnode_uses_persisted() { + let spec = E::default_spec(); + let supernode_cgc = spec.number_of_custody_groups; + + assert_custody_type_switch_unchanged_cgc( supernode_cgc, - "should use persisted supernode cgc, not fullnode cgc" + NodeCustodyType::Fullnode, + Epoch::new(0), + &spec, + ); + } + + /// Tests CLI flag change prevention: Supernode (CGC=128) → Semi-supernode (CGC stays 128) + /// CGC reduction is not supported - persisted value is retained. + #[test] + fn restore_supernode_then_switch_to_semi_supernode_keeps_supernode_cgc() { + let spec = E::default_spec(); + let supernode_cgc = spec.number_of_custody_groups; + let head_epoch = Epoch::new(10); + + assert_custody_type_switch_unchanged_cgc( + supernode_cgc, + NodeCustodyType::SemiSupernode, + head_epoch, + &spec, + ); + } + + /// Tests CLI flag change: Fullnode with validators (CGC=32) → Semi-supernode (CGC=64) + /// CGC should increase and trigger backfill via CustodyCountChanged. + #[test] + fn restore_fullnode_with_validators_then_switch_to_semi_supernode() { + let spec = E::default_spec(); + let persisted_cgc = 32u64; + let semi_supernode_cgc = spec.number_of_custody_groups / 2; + let head_epoch = Epoch::new(10); + + assert_custody_type_switch_increases_cgc( + persisted_cgc, + NodeCustodyType::SemiSupernode, + semi_supernode_cgc, + head_epoch, + &spec, + ); + } + + /// Tests CLI flag change: Semi-supernode (CGC=64) → Supernode (CGC=128) + /// CGC should increase and trigger backfill via CustodyCountChanged. + #[test] + fn restore_semi_supernode_then_switch_to_supernode() { + let spec = E::default_spec(); + let semi_supernode_cgc = spec.number_of_custody_groups / 2; + let supernode_cgc = spec.number_of_custody_groups; + let head_epoch = Epoch::new(10); + + assert_custody_type_switch_increases_cgc( + semi_supernode_cgc, + NodeCustodyType::Supernode, + supernode_cgc, + head_epoch, + &spec, + ); + } + + /// Tests CLI flag change: Fullnode with validators (CGC=32) → Supernode (CGC=128) + /// CGC should increase and trigger backfill via CustodyCountChanged. + #[test] + fn restore_with_cli_flag_increases_cgc_from_nonzero() { + let spec = E::default_spec(); + let persisted_cgc = 32u64; + let supernode_cgc = spec.number_of_custody_groups; + let head_epoch = Epoch::new(10); + + assert_custody_type_switch_increases_cgc( + persisted_cgc, + NodeCustodyType::Supernode, + supernode_cgc, + head_epoch, + &spec, ); } @@ -992,9 +1309,10 @@ mod tests { ], }; - let custody_context = CustodyContext::::new_from_persisted_custody_context( + let (custody_context, _) = CustodyContext::::new_from_persisted_custody_context( ssz_context, NodeCustodyType::Fullnode, + Epoch::new(20), &spec, ); @@ -1033,4 +1351,77 @@ mod tests { "sampling at epoch 25 should match final cgc" ); } + + #[test] + fn backfill_single_cgc_increase_updates_past_epochs() { + let spec = E::default_spec(); + let final_cgc = 32u64; + let default_cgc = spec.custody_requirement; + + // Setup: Node restart after validators were registered, causing CGC increase to 32 at epoch 20 + let head_epoch = Epoch::new(20); + let epoch_and_cgc_tuples = vec![(head_epoch, final_cgc)]; + let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(15), &spec), + default_cgc, + ); + + // Backfill from epoch 20 down to 15 (simulating backfill) + complete_backfill_for_epochs(&custody_context, head_epoch, Epoch::new(15)); + + // After backfilling to epoch 15, it should use latest CGC (32) + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(15), &spec), + final_cgc, + ); + assert_eq!( + custody_context + .custody_columns_for_epoch(Some(Epoch::new(15)), &spec) + .len(), + final_cgc as usize, + ); + + // Prior epoch should still return the original CGC + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(14), &spec), + default_cgc, + ); + } + + #[test] + fn backfill_with_multiple_cgc_increases_prunes_map_correctly() { + let spec = E::default_spec(); + let initial_cgc = 8u64; + let mid_cgc = 16u64; + let final_cgc = 32u64; + + // Setup: Node restart after multiple validator registrations causing CGC increases + let head_epoch = Epoch::new(20); + let epoch_and_cgc_tuples = vec![ + (Epoch::new(0), initial_cgc), + (Epoch::new(10), mid_cgc), + (head_epoch, final_cgc), + ]; + let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); + + // Backfill to epoch 15 (between the two CGC increases) + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15)); + + // Verify epochs 15 - 20 return latest CGC (32) + for epoch in 15..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + + // Verify epochs 10-14 still return mid_cgc (16) + for epoch in 10..14 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + mid_cgc, + ); + } + } } From c668cb7d9a913ac9d4ab2ffe0d5c5b854bbfc1c6 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 22 Oct 2025 22:05:08 -0700 Subject: [PATCH 02/44] Only publish reconstructed columns that we need to sample (#8269) N/A We were publishing columns all columns that we didn't already have in the da cache when reconstructing. This is unnecessary outbound bandwidth for the node that is supposed to sample fewer columns. This PR changes the behaviour to publish only columns that we are supposed to sample in the topics that we are subscribed to. Co-Authored-By: Pawan Dhananjay --- .../src/data_availability_checker.rs | 45 +++++++++---------- beacon_node/beacon_chain/src/metrics.rs | 2 +- 2 files changed, 22 insertions(+), 25 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index d6cc8d8947..4dbc634b24 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -617,48 +617,45 @@ impl DataAvailabilityChecker { )); }; - let data_columns_to_publish = all_data_columns - .into_iter() - .filter(|d| !existing_column_indices.contains(&d.index())) - .collect::>(); - - let Some(slot) = data_columns_to_publish - .first() - .map(|d| d.as_data_column().slot()) - else { + let Some(slot) = all_data_columns.first().map(|d| d.as_data_column().slot()) else { return Ok(DataColumnReconstructionResult::RecoveredColumnsNotImported( "No new columns to import and publish", )); }; + let columns_to_sample = self + .custody_context() + .sampling_columns_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch()), &self.spec); + + // We only need to import and publish columns that we need to sample + // and columns that we haven't already received + let data_columns_to_import_and_publish = all_data_columns + .into_iter() + .filter(|d| { + columns_to_sample.contains(&d.index()) + && !existing_column_indices.contains(&d.index()) + }) + .collect::>(); + metrics::stop_timer(timer); metrics::inc_counter_by( &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, - data_columns_to_publish.len() as u64, + data_columns_to_import_and_publish.len() as u64, ); debug!( - count = data_columns_to_publish.len(), + count = data_columns_to_import_and_publish.len(), ?block_root, %slot, "Reconstructed columns" ); - let columns_to_sample = self - .custody_context() - .sampling_columns_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch()), &self.spec); - let data_columns_to_import: Vec<_> = data_columns_to_publish - .iter() - .filter(|column| columns_to_sample.contains(&column.index())) - .cloned() - .collect(); - self.availability_cache - .put_kzg_verified_data_columns(*block_root, data_columns_to_import) + .put_kzg_verified_data_columns(*block_root, data_columns_to_import_and_publish.clone()) .map(|availability| { DataColumnReconstructionResult::Success(( availability, - data_columns_to_publish + data_columns_to_import_and_publish .into_iter() .map(|d| d.clone_arc()) .collect::>(), @@ -1163,8 +1160,8 @@ mod test { // Remaining 64 columns should be reconstructed assert_eq!( reconstructed_columns.len(), - 64, - "should reconstruct the remaining 64 columns" + sampling_requirement - spec.number_of_custody_groups as usize / 2, + "should reconstruct the remaining 1 columns" ); // Only the columns required for custody (65) should be imported into the cache diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 0d34ffdcd1..8f1da7b67b 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1875,7 +1875,7 @@ pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> LazyLock::new(|| { try_create_int_counter( "beacon_data_availability_reconstructed_columns_total", - "Total count of reconstructed columns", + "Total count of useful reconstructed columns", ) }); From 2e55a0a9c8eaad2287ae97279d2e1d7161f28470 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 23 Oct 2025 16:54:24 +1100 Subject: [PATCH 03/44] New design for blob/column pruning (#8266) We are seeing some crazy IO utilisation on Holesky now that data columns have started to expire. Our previous approach of _iterating the entire blobs DB_ doesn't seem to be scaling. New blob pruning algorithm that uses a backwards block iterator from the epoch we want to prune, stopping early if an already-pruned slot is encountered. Co-Authored-By: Michael Sproul --- beacon_node/store/src/hot_cold_store.rs | 146 ++++++++++++++++-------- 1 file changed, 100 insertions(+), 46 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 895afa4f33..a0a75dbb0d 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -146,9 +146,13 @@ impl BlockCache { pub fn delete_blobs(&mut self, block_root: &Hash256) { let _ = self.blob_cache.pop(block_root); } + pub fn delete_data_columns(&mut self, block_root: &Hash256) { + let _ = self.data_column_cache.pop(block_root); + } pub fn delete(&mut self, block_root: &Hash256) { - let _ = self.block_cache.pop(block_root); - let _ = self.blob_cache.pop(block_root); + self.delete_block(block_root); + self.delete_blobs(block_root); + self.delete_data_columns(block_root); } } @@ -2553,6 +2557,16 @@ impl, Cold: ItemStore> HotColdDB .collect() } + /// Fetch all possible data column keys for a given `block_root`. + /// + /// Unlike `get_data_column_keys`, these keys are not necessarily all present in the database, + /// due to the node's custody requirements many just store a subset. + pub fn get_all_data_column_keys(&self, block_root: Hash256) -> Vec> { + (0..E::number_of_columns() as u64) + .map(|column_index| get_data_column_key(&block_root, &column_index)) + .collect() + } + /// Fetch a single data_column for a given block from the store. pub fn get_data_column( &self, @@ -3228,13 +3242,14 @@ impl, Cold: ItemStore> HotColdDB return Err(HotColdDBError::BlobPruneLogicError.into()); }; - // Start pruning from the epoch of the oldest blob stored. - // The start epoch is inclusive (blobs in this epoch will be pruned). + // The start epoch is not necessarily iterated back to, but is used for deciding whether we + // should attempt pruning. We could probably refactor it out eventually (while reducing our + // dependence on BlobInfo). let start_epoch = oldest_blob_slot.epoch(E::slots_per_epoch()); // Prune blobs up until the `data_availability_boundary - margin` or the split // slot's epoch, whichever is older. We can't prune blobs newer than the split. - // The end epoch is also inclusive (blobs in this epoch will be pruned). + // The end epoch is inclusive (blobs in this epoch will be pruned). let split = self.get_split_info(); let end_epoch = std::cmp::min( data_availability_boundary - margin_epochs - 1, @@ -3257,20 +3272,30 @@ impl, Cold: ItemStore> HotColdDB return Ok(()); } - // Sanity checks. - let anchor = self.get_anchor_info(); - if oldest_blob_slot < anchor.oldest_block_slot { - error!( - %oldest_blob_slot, - oldest_block_slot = %anchor.oldest_block_slot, - "Oldest blob is older than oldest block" + // Iterate blocks backwards from the `end_epoch` (usually the data availability boundary). + let Some((end_block_root, _)) = self + .forwards_block_roots_iterator_until(end_slot, end_slot, || { + self.get_hot_state(&split.state_root, true)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + )) + .map(|state| (state, split.state_root)) + .map_err(Into::into) + })? + .next() + .transpose()? + else { + // Can't prune blobs if we don't know the block at `end_slot`. This is expected if we + // have checkpoint synced and haven't backfilled to the DA boundary yet. + debug!( + %end_epoch, + %data_availability_boundary, + "No blobs to prune" ); - return Err(HotColdDBError::BlobPruneLogicError.into()); - } - - // Iterate block roots forwards from the oldest blob slot. + return Ok(()); + }; debug!( - %start_epoch, %end_epoch, %data_availability_boundary, "Pruning blobs" @@ -3279,48 +3304,77 @@ impl, Cold: ItemStore> HotColdDB // We collect block roots of deleted blobs in memory. Even for 10y of blob history this // vec won't go beyond 1GB. We can probably optimise this out eventually. let mut removed_block_roots = vec![]; + let mut blobs_db_ops = vec![]; - let remove_blob_if = |blobs_bytes: &[u8]| { - let blobs = Vec::from_ssz_bytes(blobs_bytes)?; - let Some(blob): Option<&Arc>> = blobs.first() else { - return Ok(false); + // Iterate blocks backwards until we reach a block for which we've already pruned + // blobs/columns. + for tuple in ParentRootBlockIterator::new(self, end_block_root) { + let (block_root, blinded_block) = tuple?; + let slot = blinded_block.slot(); + + // If the block has no blobs we can't tell if they've been pruned, and there is nothing + // to prune, so we just skip. + if !blinded_block.message().body().has_blobs() { + continue; + } + + // Check if we have blobs or columns stored. If not, we assume pruning has already + // reached this point. + let (db_column, db_keys) = if blinded_block.fork_name_unchecked().fulu_enabled() { + ( + DBColumn::BeaconDataColumn, + self.get_all_data_column_keys(block_root), + ) + } else { + (DBColumn::BeaconBlob, vec![block_root.as_slice().to_vec()]) }; - if blob.slot() <= end_slot { - // Store the block root so we can delete from the blob cache - removed_block_roots.push(blob.block_root()); - // Delete from the on-disk db - return Ok(true); - }; - Ok(false) - }; + // For data columns, consider a block pruned if ALL column indices are absent. + // In future we might want to refactor this to read the data column indices that *exist* + // from the DB, which could be slightly more efficient than checking existence for every + // possible column. + let mut data_stored_for_block = false; + for db_key in db_keys { + if self.blobs_db.key_exists(db_column, &db_key)? { + data_stored_for_block = true; + blobs_db_ops.push(KeyValueStoreOp::DeleteKey(db_column, db_key)); + } + } - self.blobs_db - .delete_if(DBColumn::BeaconBlob, remove_blob_if)?; - - if self.spec.is_peer_das_enabled_for_epoch(start_epoch) { - let remove_data_column_if = |blobs_bytes: &[u8]| { - let data_column: DataColumnSidecar = - DataColumnSidecar::from_ssz_bytes(blobs_bytes)?; - - if data_column.slot() <= end_slot { - return Ok(true); - }; - - Ok(false) - }; - - self.blobs_db - .delete_if(DBColumn::BeaconDataColumn, remove_data_column_if)?; + if data_stored_for_block { + debug!( + ?block_root, + %slot, + "Pruning blobs or columns for block" + ); + removed_block_roots.push(block_root); + } else { + debug!( + %slot, + ?block_root, + "Reached slot with blobs or columns already pruned" + ); + break; + } } // Remove deleted blobs from the cache. if let Some(mut block_cache) = self.block_cache.as_ref().map(|cache| cache.lock()) { for block_root in removed_block_roots { block_cache.delete_blobs(&block_root); + block_cache.delete_data_columns(&block_root); } } + // Remove from disk. + if !blobs_db_ops.is_empty() { + debug!( + num_deleted = blobs_db_ops.len(), + "Deleting blobs and data columns from disk" + ); + self.blobs_db.do_atomically(blobs_db_ops)?; + } + self.update_blob_or_data_column_info(start_epoch, end_slot, blob_info, data_column_info)?; debug!("Blob pruning complete"); From b59feb042c13fa74304acb920e720efde885d3bd Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 23 Oct 2025 18:05:49 +1100 Subject: [PATCH 04/44] Release v8.0.0 rc.2 (#8255) Open PRs to include for the release - #7907 - #8247 - #8251 - #8253 - #8254 - #8265 - #8269 - #8266 Co-Authored-By: Jimmy Chen Co-Authored-By: Jimmy Chen --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 516d0df358..1efb1fbc70 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -918,7 +918,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.0.0-rc.1" +version = "8.0.0-rc.2" dependencies = [ "account_utils", "beacon_chain", @@ -1193,7 +1193,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.0.0-rc.1" +version = "8.0.0-rc.2" dependencies = [ "beacon_node", "bytes", @@ -5064,7 +5064,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.0.0-rc.1" +version = "8.0.0-rc.2" dependencies = [ "account_utils", "beacon_chain", @@ -5574,7 +5574,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.0.0-rc.1" +version = "8.0.0-rc.2" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 985f4c1752..56c2fb410c 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "8.0.0-rc.1" +version = "8.0.0-rc.2" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index a3f0ca404f..bd7b37926f 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v8.0.0-rc.1-", - fallback = "Lighthouse/v8.0.0-rc.1" + prefix = "Lighthouse/v8.0.0-rc.2-", + fallback = "Lighthouse/v8.0.0-rc.2" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "8.0.0-rc.1" + "8.0.0-rc.2" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index e5ed7a8926..2698073b5f 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "8.0.0-rc.1" +version = "8.0.0-rc.2" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 82bfc5056e..a3240c6d7c 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "8.0.0-rc.1" +version = "8.0.0-rc.2" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From 4b522d760b896345070276c078a87afc735a5aea Mon Sep 17 00:00:00 2001 From: 0x19dG87 <75646722+0x19dG87@users.noreply.github.com> Date: Fri, 24 Oct 2025 04:13:39 +0300 Subject: [PATCH 05/44] Remove deprecated flag --disable-deposit-contract-sync from doc (#8124) Co-Authored-By: 0x19dG87 --- book/src/run_a_node.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index 6c43ef5e32..bd234ff257 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -78,12 +78,9 @@ lighthouse bn \ --network mainnet \ --execution-endpoint http://localhost:8551 \ --execution-jwt /secrets/jwt.hex \ - --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ - --disable-deposit-contract-sync + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io ``` -Since we are not staking, we can use the `--disable-deposit-contract-sync` flag to disable syncing of deposit logs from the execution node. - Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. ## Step 4: Check logs for sync status From da5b2317205efc72b98cdc922289acefe3f76a13 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 27 Oct 2025 00:47:25 +0100 Subject: [PATCH 06/44] Prevent dropping large binary data to logs (#8290) Testing non finalized checkpoint sync noticed this log that dumps blob data in Debug format to the logs. Log only block root and commitment of each blob Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- beacon_node/network/src/sync/block_sidecar_coupling.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index cd9276f7e3..d5858c23f1 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -8,7 +8,7 @@ use lighthouse_network::{ }, }; use std::{collections::HashMap, sync::Arc}; -use tracing::Span; +use tracing::{Span, debug}; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, @@ -323,10 +323,10 @@ impl RangeBlockComponentsRequest { // if accumulated sidecars is not empty, log an error but return the responses // as we can still make progress. if blob_iter.next().is_some() { - tracing::debug!( - remaining_blobs=?blob_iter.collect::>(), - "Received sidecars that don't pair well", - ); + let remaining_blobs = blob_iter + .map(|b| (b.index, b.block_root())) + .collect::>(); + debug!(?remaining_blobs, "Received sidecars that don't pair well",); } Ok(responses) From ba706ce3bfbae506a90adc897052d70c1f8cdb39 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Mon, 27 Oct 2025 16:48:10 +0800 Subject: [PATCH 07/44] Revise logging in BlobsByRoot requests (#8296) #7756 introduces a logging issue, where the relevant log: https://github.com/sigp/lighthouse/blob/da5b2317205efc72b98cdc922289acefe3f76a13/beacon_node/network/src/network_beacon_processor/rpc_methods.rs#L380-L385 obtains the `block_root` from `slots_by_block_root.keys()`. If the `block_root` is empty (block not found in the data availability checker), then the log will not show any block root: `DEBUG BlobsByRoot outgoing response processed peer_id: 16Uiu2HAmCBxs1ZFfsbAfhSA98rUUL8Q1egLPb6WpGdKZxX6HqQYX, block_root: [], returned: 4` This PR revises to return the `block_root` in the request as a vector of block root Co-Authored-By: Tan Chee Keong --- .../network/src/network_beacon_processor/rpc_methods.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index a81595322b..ac24b648e0 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -19,7 +19,7 @@ use lighthouse_tracing::{ }; use methods::LightClientUpdatesByRangeRequest; use slot_clock::SlotClock; -use std::collections::{HashMap, hash_map::Entry}; +use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::sync::Arc; use tokio_stream::StreamExt; use tracing::{Span, debug, error, field, instrument, warn}; @@ -293,6 +293,9 @@ impl NetworkBeaconProcessor { inbound_request_id: InboundRequestId, request: BlobsByRootRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { + let requested_roots: HashSet = + request.blob_ids.iter().map(|id| id.block_root).collect(); + let mut send_blob_count = 0; let fulu_start_slot = self @@ -379,7 +382,7 @@ impl NetworkBeaconProcessor { debug!( %peer_id, - block_root = ?slots_by_block_root.keys(), + ?requested_roots, returned = send_blob_count, "BlobsByRoot outgoing response processed" ); From d67ae921125dd22e15d91aa324a27725ac03df41 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 27 Oct 2025 19:48:12 +1100 Subject: [PATCH 08/44] Implement `/lighthouse/custody/info` API (#8276) Closes: - https://github.com/sigp/lighthouse/issues/8249 New `/lighthouse/custody` API including: - [x] Earliest custodied data column slot - [x] Node CGC - [x] Custodied columns Co-Authored-By: Michael Sproul --- beacon_node/http_api/src/custody.rs | 53 +++++++++++++ beacon_node/http_api/src/lib.rs | 15 ++++ .../http_api/tests/interactive_tests.rs | 78 ++++++++++++++++++- common/eth2/src/lighthouse.rs | 15 ++++ common/eth2/src/lighthouse/custody.rs | 11 +++ consensus/types/src/chain_spec.rs | 2 +- 6 files changed, 172 insertions(+), 2 deletions(-) create mode 100644 beacon_node/http_api/src/custody.rs create mode 100644 common/eth2/src/lighthouse/custody.rs diff --git a/beacon_node/http_api/src/custody.rs b/beacon_node/http_api/src/custody.rs new file mode 100644 index 0000000000..a43b55ceca --- /dev/null +++ b/beacon_node/http_api/src/custody.rs @@ -0,0 +1,53 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::lighthouse::CustodyInfo; +use std::sync::Arc; +use types::EthSpec; +use warp_utils::reject::{custom_bad_request, custom_server_error}; + +pub fn info( + chain: Arc>, +) -> Result { + if !chain.spec.is_fulu_scheduled() { + return Err(custom_bad_request("Fulu is not scheduled".to_string())); + } + + let opt_data_column_custody_info = chain + .store + .get_data_column_custody_info() + .map_err(|e| custom_server_error(format!("error reading DataColumnCustodyInfo: {e:?}")))?; + + let column_data_availability_boundary = chain + .column_data_availability_boundary() + .ok_or_else(|| custom_server_error("unreachable: Fulu should be enabled".to_string()))?; + + let earliest_custodied_data_column_slot = opt_data_column_custody_info + .and_then(|info| info.earliest_data_column_slot) + .unwrap_or_else(|| { + // If there's no data column custody info/earliest data column slot, it means *column* + // backfill is not running. Block backfill could still be running, so our earliest + // available column is either the oldest block slot or the DA boundary, whichever is + // more recent. + let oldest_block_slot = chain.store.get_anchor_info().oldest_block_slot; + column_data_availability_boundary + .start_slot(T::EthSpec::slots_per_epoch()) + .max(oldest_block_slot) + }); + let earliest_custodied_data_column_epoch = + earliest_custodied_data_column_slot.epoch(T::EthSpec::slots_per_epoch()); + + // Compute the custody columns and the CGC *at the earliest custodied slot*. The node might + // have some columns prior to this, but this value is the most up-to-date view of the data the + // node is custodying. + let custody_context = chain.data_availability_checker.custody_context(); + let custody_columns = custody_context + .custody_columns_for_epoch(Some(earliest_custodied_data_column_epoch), &chain.spec) + .to_vec(); + let custody_group_count = custody_context + .custody_group_count_at_epoch(earliest_custodied_data_column_epoch, &chain.spec); + + Ok(CustodyInfo { + earliest_custodied_data_column_slot, + custody_group_count, + custody_columns, + }) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index f6d8dbc157..41cd729a68 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,6 +13,7 @@ mod block_packing_efficiency; mod block_rewards; mod build_block_contents; mod builder_states; +mod custody; mod database; mod light_client; mod metrics; @@ -4590,6 +4591,19 @@ pub fn serve( }, ); + // GET lighthouse/custody/info + let get_lighthouse_custody_info = warp::path("lighthouse") + .and(warp::path("custody")) + .and(warp::path("info")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || custody::info(chain)) + }, + ); + // GET lighthouse/analysis/block_rewards let get_lighthouse_block_rewards = warp::path("lighthouse") .and(warp::path("analysis")) @@ -4891,6 +4905,7 @@ pub fn serve( .uor(get_lighthouse_validator_inclusion) .uor(get_lighthouse_staking) .uor(get_lighthouse_database_info) + .uor(get_lighthouse_custody_info) .uor(get_lighthouse_block_rewards) .uor(get_lighthouse_attestation_performance) .uor(get_beacon_light_client_optimistic_update) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 94b773c32d..5b016a7de4 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -2,7 +2,9 @@ use beacon_chain::{ ChainConfig, chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, - test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy}, + test_utils::{ + AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy, test_spec, + }, }; use beacon_processor::{Work, WorkEvent, work_reprocessing_queue::ReprocessQueueMessage}; use eth2::types::ProduceBlockV3Response; @@ -1047,3 +1049,77 @@ async fn proposer_duties_with_gossip_tolerance() { proposer_duties_current_epoch ); } + +// Test that a request for next epoch proposer duties suceeds when the current slot clock is within +// gossip clock disparity (500ms) of the new epoch. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn lighthouse_custody_info() { + let mut spec = test_spec::(); + + // Skip pre-Fulu. + if !spec.is_fulu_scheduled() { + return; + } + + // Use a short DA expiry period so we can observe non-zero values for the oldest data column + // slot. + spec.min_epochs_for_blob_sidecars_requests = 2; + spec.min_epochs_for_data_column_sidecars_requests = 2; + + let validator_count = 24; + + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + let harness = &tester.harness; + let spec = &harness.spec; + let client = &tester.client; + + let num_initial = 2 * E::slots_per_epoch(); + let num_secondary = 2 * E::slots_per_epoch(); + + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await; + + assert_eq!(harness.chain.slot().unwrap(), num_initial); + + let info = client.get_lighthouse_custody_info().await.unwrap(); + assert_eq!(info.earliest_custodied_data_column_slot, 0); + assert_eq!(info.custody_group_count, spec.custody_requirement); + assert_eq!( + info.custody_columns.len(), + info.custody_group_count as usize + ); + + // Advance the chain some more to expire some blobs. + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_secondary as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await; + + assert_eq!(harness.chain.slot().unwrap(), num_initial + num_secondary); + + let info = client.get_lighthouse_custody_info().await.unwrap(); + assert_eq!( + info.earliest_custodied_data_column_slot, + num_initial + num_secondary + - spec.min_epochs_for_data_column_sidecars_requests * E::slots_per_epoch() + ); + assert_eq!(info.custody_group_count, spec.custody_requirement); + assert_eq!( + info.custody_columns.len(), + info.custody_group_count as usize + ); +} diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 4349b48796..f65b5a07b6 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -3,6 +3,7 @@ mod attestation_performance; mod block_packing_efficiency; mod block_rewards; +mod custody; pub mod sync_state; use crate::{ @@ -22,6 +23,7 @@ pub use block_packing_efficiency::{ BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, }; pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; +pub use custody::CustodyInfo; // Define "legacy" implementations of `Option` which use four bytes for encoding the union // selector. @@ -193,6 +195,19 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET lighthouse/custody/info` + pub async fn get_lighthouse_custody_info(&self) -> Result { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("custody") + .push("info"); + + self.get(path).await + } + /* * Note: * diff --git a/common/eth2/src/lighthouse/custody.rs b/common/eth2/src/lighthouse/custody.rs new file mode 100644 index 0000000000..c9f9c16520 --- /dev/null +++ b/common/eth2/src/lighthouse/custody.rs @@ -0,0 +1,11 @@ +use serde::{Deserialize, Serialize}; +use types::Slot; + +#[derive(Debug, PartialEq, Deserialize, Serialize)] +pub struct CustodyInfo { + pub earliest_custodied_data_column_slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] + pub custody_group_count: u64, + #[serde(with = "serde_utils::quoted_u64_vec")] + pub custody_columns: Vec, +} diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 421655777e..93f5140383 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -255,7 +255,7 @@ pub struct ChainSpec { * Networking Fulu */ pub(crate) blob_schedule: BlobSchedule, - min_epochs_for_data_column_sidecars_requests: u64, + pub min_epochs_for_data_column_sidecars_requests: u64, /* * Networking Gloas From 9baef8b8494db145802c3bea881d5ae2fb0068c6 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Mon, 27 Oct 2025 17:09:54 +0800 Subject: [PATCH 09/44] Update Lighthouse book (#8284) Co-Authored-By: Tan Chee Keong Co-Authored-By: chonghe <44791194+chong-he@users.noreply.github.com> --- book/src/advanced.md | 2 +- book/src/advanced_blobs.md | 25 ++++++++++++++++++++++++- book/src/advanced_checkpoint_sync.md | 8 ++++---- book/src/api_lighthouse.md | 2 +- book/src/faq.md | 28 +--------------------------- book/src/installation_binaries.md | 3 +-- book/src/run_a_node.md | 2 +- book/src/ui_faqs.md | 4 ++-- book/src/ui_installation.md | 2 +- book/src/validator_voluntary_exit.md | 21 ++------------------- wordlist.txt | 7 ++++++- 11 files changed, 44 insertions(+), 60 deletions(-) diff --git a/book/src/advanced.md b/book/src/advanced.md index 76a7fed202..650b99d456 100644 --- a/book/src/advanced.md +++ b/book/src/advanced.md @@ -19,4 +19,4 @@ tips about how things work under the hood. * [Release Candidates](./advanced_release_candidates.md): latest release of Lighthouse to get feedback from users. * [Maximal Extractable Value](./advanced_builders.md): use external builders for a potential higher rewards during block proposals * [Late Block Re-orgs](./advanced_re-orgs.md): read information about Lighthouse late block re-orgs. -* [Blobs](./advanced_blobs.md): information about blobs in Deneb upgrade +* [Blobs](./advanced_blobs.md): information about blobs diff --git a/book/src/advanced_blobs.md b/book/src/advanced_blobs.md index 524f70219f..ccc29acf26 100644 --- a/book/src/advanced_blobs.md +++ b/book/src/advanced_blobs.md @@ -1,4 +1,27 @@ -# Blobs +# Data columns + +With the [Fusaka](https://ethereum.org/roadmap/fusaka) upgrade, the main feature [PeerDAS](https://ethereum.org/roadmap/fusaka#peerdas) allows storing only a portion of blob data, known as data columns, thus reducing the storage and bandwidth requirements of a full node. This however also means that a full node will not be able to serve blobs after Fusaka. To continue serving blobs, run the beacon node with `--semi-supernode` or `--supernode`. Note that this comes at a significant increase in storage and bandwidth requirements, see [this blog post about PeerDAS](https://blog.sigmaprime.io/peerdas-distributed-blob-building.html) and [Fusaka bandwidth estimation](https://ethpandaops.io/posts/fusaka-bandwidth-estimation/) for more details. + +> Note: the above assumes that the beacon node has no attached validators. If the beacon node has attached validators, then it is required to custody (store) a certain number of data columns which increases with the number of staked ETH. For example, if the staked ETH is `$\geq$` 2048 ETH, then due to custody requirement, it will make the beacon node a semi-supernode ; if `$\geq$` 4096 ETH, the beacon node will be a supernode without needing the flag. + +Table below summarizes the role of relevant flags in Lighthouse beacon node: + +| | Post-Deneb, Pre-Fulu || Post-Fulu || +|-------|----------|----------|-----------|----------| +| Flag | Usage | Can serve blobs? | Usage | Can serve blobs? | +| --prune-blobs false | Does not prune blobs since using the flag | Yes, for blobs since using the flag and for the past 18 days | Does not prune data columns since using the flag | No | +| --semi-supernode | - | - | Store half data columns | Yes, for blobs since using the flag for a max of 18 days | +| --supernode | - | - | Store all data columns | Yes, for blobs since using the flag for a max of 18 days | + +While both `--supernode` and `--semi-supernode` can serve blobs, a supernode will be faster to respond to blobs queries as it skips the blob reconstruction step. Running a supernode also helps the network by serving the data columns to its peers. + +Combining `--prune-blobs false` and `--supernode` (or `--semi-supernode`) implies that no data columns will be pruned, and the node will be able to serve blobs since using the flag. + +If you want historical blob data beyond the data availability period (18 days), you can backfill blobs or data columns with the experimental flag `--complete-blobs-backfill`. However, do note that this is an experimental feature and it may cause some issues, e.g., the node may block most of its peers. + +**⚠️ The following section on Blobs is archived and not maintained as blobs are stored in the form of data columns after the Fulu fork ⚠️** + +## Blobs In the Deneb network upgrade, one of the changes is the implementation of EIP-4844, also known as [Proto-danksharding](https://blog.ethereum.org/2024/02/27/dencun-mainnet-announcement). Alongside with this, a new term named `blob` (binary large object) is introduced. Blobs are "side-cars" carrying transaction data in a block. They are mainly used by Ethereum layer 2 operators. As far as stakers are concerned, the main difference with the introduction of blobs is the increased storage requirement. diff --git a/book/src/advanced_checkpoint_sync.md b/book/src/advanced_checkpoint_sync.md index 45aed6ef58..9cc18dda8c 100644 --- a/book/src/advanced_checkpoint_sync.md +++ b/book/src/advanced_checkpoint_sync.md @@ -82,7 +82,7 @@ Once backfill is complete, a `INFO Historical block download complete` log will 1. What if I have an existing database? How can I use checkpoint sync? The existing beacon database needs to be deleted before Lighthouse will attempt checkpoint sync. - You can do this by providing the `--purge-db` flag, or by manually deleting `/beacon`. + You can do this by providing the `--purge-db-force` flag, or by manually deleting `/beacon`. 1. Why is checkpoint sync faster? @@ -92,7 +92,7 @@ Once backfill is complete, a `INFO Historical block download complete` log will No, in fact it is more secure! Checkpoint sync guards against long-range attacks that genesis sync does not. This is due to a property of Proof of Stake consensus known as [Weak Subjectivity][weak-subj]. -## Reconstructing States +## How to run an archived node > This section is only relevant if you are interested in running an archival node for analysis > purposes. @@ -101,7 +101,7 @@ After completing backfill sync the node's database will differ from a genesis-sy lack of historic states. _You do not need these states to run a staking node_, but they are required for historical API calls (as used by block explorers and researchers). -You can opt-in to reconstructing all of the historic states by providing the +To run an archived node, you can opt-in to reconstructing all of the historic states by providing the `--reconstruct-historic-states` flag to the beacon node at any point (before, during or after sync). The database keeps track of three markers to determine the availability of historic blocks and @@ -155,7 +155,7 @@ The command is as following: ```bash curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v2/debug/beacon/states/$SLOT" > state.ssz curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v2/beacon/blocks/$SLOT" > block.ssz -curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v1/beacon/blob_sidecars/$SLOT" > blobs.ssz +curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v1/beacon/blobs/$SLOT" > blobs.ssz ``` where `$SLOT` is the slot number. A slot which is an epoch boundary slot (i.e., first slot of an epoch) should always be used for manual checkpoint sync. diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index 2eee8356b1..2e694989f9 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -445,7 +445,7 @@ For archive nodes, the `anchor` will be: indicating that all states with slots `>= 0` are available, i.e., full state history. For more information on the specific meanings of these fields see the docs on [Checkpoint -Sync](./advanced_checkpoint_sync.md#reconstructing-states). +Sync](./advanced_checkpoint_sync.md#how-to-run-an-archived-node). ## `/lighthouse/merge_readiness` diff --git a/book/src/faq.md b/book/src/faq.md index 87ef288900..c9bc53533f 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -2,7 +2,6 @@ ## [Beacon Node](#beacon-node-1) -- [I see a warning about "Syncing deposit contract block cache" or an error about "updating deposit contract cache", what should I do?](#bn-deposit-contract) - [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#bn-ee) - [I see beacon logs showing `Error during execution engine upcheck`, what should I do?](#bn-upcheck) - [My beacon node is stuck at downloading historical block using checkpoint sync. What should I do?](#bn-download-historical) @@ -51,31 +50,6 @@ ## Beacon Node -### I see a warning about "Syncing deposit contract block cache" or an error about "updating deposit contract cache", what should I do? - -The error can be a warning: - -```text -Nov 30 21:04:28.268 WARN Syncing deposit contract block cache est_blocks_remaining: initializing deposits, service: slot_notifier -``` - -or an error: - -```text -ERRO Error updating deposit contract cache error: Failed to get remote head and new block ranges: EndpointError(FarBehind), retry_millis: 60000, service: deposit_contract_rpc -``` - -This log indicates that your beacon node is downloading blocks and deposits -from your execution node. When the `est_blocks_remaining` is -`initializing_deposits`, your node is downloading deposit logs. It may stay in -this stage for several minutes. Once the deposits logs are finished -downloading, the `est_blocks_remaining` value will start decreasing. - -It is perfectly normal to see this log when starting a node for the first time -or after being off for more than several minutes. - -If this log continues appearing during operation, it means your execution client is still syncing and it cannot provide Lighthouse the information about the deposit contract yet. What you need to do is to make sure that the execution client is up and syncing. Once the execution client is synced, the error will disappear. - ### I see beacon logs showing `WARN: Execution engine called failed`, what should I do? The `WARN Execution engine called failed` log is shown when the beacon node cannot reach the execution engine. When this warning occurs, it will be followed by a detailed message. A frequently encountered example of the error message is: @@ -335,7 +309,7 @@ expect, there are a few things to check on: If you have incoming peers, it should return a lot of data containing information of peers. If the response is empty, it means that you have no incoming peers and there the ports are not open. You may want to double check if the port forward was correctly set up. -1. Check that you do not lower the number of peers using the flag `--target-peers`. The default is 100. A lower value set will lower the maximum number of peers your node can connect to, which may potentially interrupt the validator performance. We recommend users to leave the `--target peers` untouched to keep a diverse set of peers. +1. Check that you do not lower the number of peers using the flag `--target-peers`. The default is 200. A lower value set will lower the maximum number of peers your node can connect to, which may potentially interrupt the validator performance. We recommend users to leave the `--target peers` untouched to keep a diverse set of peers. 1. Ensure that you have a quality router for the internet connection. For example, if you connect the router to many devices including the node, it may be possible that the router cannot handle all routing tasks, hence struggling to keep up the number of peers. Therefore, using a quality router for the node is important to keep a healthy number of peers. diff --git a/book/src/installation_binaries.md b/book/src/installation_binaries.md index 1d5477dcf7..67a629e5c3 100644 --- a/book/src/installation_binaries.md +++ b/book/src/installation_binaries.md @@ -6,11 +6,10 @@ on Github](https://github.com/sigp/lighthouse/releases). ## Platforms -Binaries are supplied for five platforms: +Binaries are supplied for the following platforms: - `x86_64-unknown-linux-gnu`: AMD/Intel 64-bit processors (most desktops, laptops, servers) - `aarch64-unknown-linux-gnu`: 64-bit ARM processors (Raspberry Pi 4) -- `x86_64-apple-darwin`: macOS with Intel chips - `aarch64-apple-darwin`: macOS with ARM chips - `x86_64-windows`: Windows with 64-bit processors diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index bd234ff257..4d1f917fcb 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -106,7 +106,7 @@ Once the checkpoint is loaded, Lighthouse will sync forwards to the head of the If a validator client is connected to the beacon node it will be able to start its duties as soon as forwards sync completes, which typically takes 1-2 minutes. -> Note: If you have an existing Lighthouse database, you will need to delete the database by using the `--purge-db` flag or manually delete the database with `sudo rm -r /path_to_database/beacon`. If you do use a `--purge-db` flag, once checkpoint sync is complete, you can remove the flag upon a restart. +> Note: If you have an existing Lighthouse database, you will need to delete the database by using the `--purge-db-force` flag or manually delete the database with `sudo rm -r /path_to_database/beacon`. If you do use a `--purge-db-force` flag, once checkpoint sync is complete, you can remove the flag upon a restart. > **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint > against a trusted source like another [public endpoint](https://eth-clients.github.io/checkpoint-sync-endpoints/), diff --git a/book/src/ui_faqs.md b/book/src/ui_faqs.md index cbfaa2c430..d6b93e6012 100644 --- a/book/src/ui_faqs.md +++ b/book/src/ui_faqs.md @@ -30,9 +30,9 @@ Yes, if you need to access your beacon or validator from an address such as `htt If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. -## 8. How can I connect to Siren using Wallet Connect? +## 8. How can I connect to Siren using Reown (previously WalletConnect)? -Depending on your configuration, building with Docker or Local, you will need to include the `NEXT_PUBLIC_WALLET_CONNECT_ID` variable in your `.env` file. To obtain your Wallet Connect project ID, please follow the instructions on their [website](https://cloud.walletconnect.com/sign-in). After providing a valid project ID, the Wallet Connect option should appear in the wallet connector dropdown. +Depending on your configuration, building with Docker or Local, you will need to include the `NEXT_PUBLIC_WALLET_CONNECT_ID` variable in your `.env` file. To obtain your Wallet Connect project ID, please follow the instructions on their [website](https://dashboard.reown.com/sign-in). After providing a valid project ID, the Wallet Connect option should appear in the wallet connector dropdown. ## 9. I can't log in to Siren even with correct credentials? diff --git a/book/src/ui_installation.md b/book/src/ui_installation.md index df0522f07a..5a78565004 100644 --- a/book/src/ui_installation.md +++ b/book/src/ui_installation.md @@ -13,7 +13,7 @@ Siren requires a connection to both a Lighthouse Validator Client and a Lighthou Both the Beacon node and the Validator client need to have their HTTP APIs enabled. These ports should be accessible from Siren. This means adding the flag `--http` on both beacon node and validator client. -To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This action ensures that the HTTP API can be accessed by other software on the same machine. +To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This action ensures that the HTTP API can be accessed by other software on the same machine. It also enables the validator monitoring. > The Beacon Node must be run with the `--gui` flag set. diff --git a/book/src/validator_voluntary_exit.md b/book/src/validator_voluntary_exit.md index ff404518b7..3b660efe70 100644 --- a/book/src/validator_voluntary_exit.md +++ b/book/src/validator_voluntary_exit.md @@ -120,26 +120,9 @@ There are two types of withdrawal credentials, `0x00` and `0x01`. To check which - A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. -- A varying time of "validator sweep" that can take up to _n_ days with _n_ listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. +- A varying time of "validator sweep" that take a few days. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. -
- -| Number of eligible validators | Ideal scenario _n_ | Practical scenario _n_ | -|:----------------:|:---------------------:|:----:| -| 300000 | 2.60 | 2.63 | -| 400000 | 3.47 | 3.51 | -| 500000 | 4.34 | 4.38 | -| 600000 | 5.21 | 5.26 | -| 700000 | 6.08 | 6.14 | -| 800000 | 6.94 | 7.01 | -| 900000 | 7.81 | 7.89 | -| 1000000 | 8.68 | 8.77 | - -
- -> Note: Ideal scenario assumes no block proposals are missed. This means a total of withdrawals of 7200 blocks/day * 16 withdrawals/block = 115200 withdrawals/day. Practical scenario assumes 1% of blocks are missed per day. As an example, if there are 700000 eligible validators, one would expect a waiting time of slightly more than 6 days. - - The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address. +The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address. The voluntary exit and full withdrawal process is summarized in the Figure below. diff --git a/wordlist.txt b/wordlist.txt index 58c4cf6db1..6d6906f6a7 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -40,6 +40,7 @@ Exercism Extractable FFG Fulu +Fusaka Geth GiB Gitcoin @@ -70,6 +71,7 @@ NodeJS NullLogger PathBuf Pectra +PeerDAS PowerShell PPA Pre @@ -78,8 +80,9 @@ PRs Prysm QUIC QuickNode -RasPi README +RasPi +Reown RESTful Reth RHEL @@ -107,6 +110,7 @@ Validator VC VCs VPN +WalletConnect Withdrawable WSL XFS @@ -222,6 +226,7 @@ src stakers subnet subnets +supernode systemd testnet testnets From 613ce3c011902d7b939e36e2f041f30d374698b9 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Mon, 27 Oct 2025 11:23:45 +0000 Subject: [PATCH 10/44] chore!: remove pub visibility on `OVERFLOW_LRU_CAPACITY` and `STATE_LRU_CAPACITY_NON_ZERO` (#8234) - Renames `OVERFLOW_LRU_CAPACITY` to `OVERFLOW_LRU_CAPACITY_NON_ZERO` to follow naming convention of `STATE_LRU_CAPACITY_NON_ZERO` - Makes `OVERFLOW_LRU_CAPACITY_NON_ZERO` and `STATE_LRU_CAPACITY_NON_ZERO` private since they are only used in this module - Moves `STATE_LRU_CAPACITY` into test module since it is only used for tests Co-Authored-By: Kevaundray Wedderburn --- beacon_node/beacon_chain/src/data_availability_checker.rs | 7 +++---- .../src/data_availability_checker/overflow_lru_cache.rs | 3 ++- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 4dbc634b24..644c471698 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -52,9 +52,8 @@ use types::non_zero_usize::new_non_zero_usize; /// /// `PendingComponents` are now never removed from the cache manually are only removed via LRU /// eviction to prevent race conditions (#7961), so we expect this cache to be full all the time. -pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(32); -pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(32); -pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); +const OVERFLOW_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(32); +const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(32); /// Cache to hold fully valid data that can't be imported to fork-choice yet. After Dencun hard-fork /// blocks have a sidecar of data that is received separately from the network. We call the concept @@ -128,7 +127,7 @@ impl DataAvailabilityChecker { spec: Arc, ) -> Result { let inner = DataAvailabilityCheckerInner::new( - OVERFLOW_LRU_CAPACITY, + OVERFLOW_LRU_CAPACITY_NON_ZERO, store, custody_context.clone(), spec.clone(), diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index b842a1a3f9..402dac1fa8 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -828,7 +828,7 @@ mod test { block_verification::PayloadVerificationOutcome, block_verification_types::{AsBlock, BlockImportData}, custody_context::NodeCustodyType, - data_availability_checker::STATE_LRU_CAPACITY, + data_availability_checker::STATE_LRU_CAPACITY_NON_ZERO, test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, }; use fork_choice::PayloadVerificationStatus; @@ -842,6 +842,7 @@ mod test { use types::{ExecPayload, MinimalEthSpec}; const LOW_VALIDATOR_COUNT: usize = 32; + const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); fn get_store_with_spec( db_path: &TempDir, From 5db1dff8a68c5506860ecfebede5d8507947de3c Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 28 Oct 2025 00:33:58 +0100 Subject: [PATCH 11/44] Downgrade gossip logs set to INFO level (#8288) Testing non-finality checkpoint synced these logs showed up in my INFO grep and were noisy. INFO should only include the notifier and exceptional events. I don't see why the user would care about this info. Downgrade to debug Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- .../network/src/network_beacon_processor/gossip_methods.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index fa6b5fd243..a492ece508 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -962,7 +962,7 @@ impl NetworkBeaconProcessor { match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - info!( + debug!( %block_root, "Gossipsub blob processed - imported fully available block" ); @@ -1035,7 +1035,7 @@ impl NetworkBeaconProcessor { match &result { Ok(availability) => match availability { AvailabilityProcessingStatus::Imported(block_root) => { - info!( + debug!( %block_root, "Gossipsub data column processed, imported fully available block" ); From 6e71fd7c19c9ea54f546cebfd9aeec192070e1a1 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Tue, 28 Oct 2025 01:20:43 +0000 Subject: [PATCH 12/44] chore: fix typo (#8292) Co-Authored-By: kevaundray --- consensus/types/src/fork_name.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 338e2b1e75..1d7bf3795b 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -144,7 +144,7 @@ impl ForkName { /// Return the name of the fork immediately prior to the current one. /// - /// If `self` is `ForkName::Base` then `Base` is returned. + /// If `self` is `ForkName::Base` then `None` is returned. pub fn previous_fork(self) -> Option { match self { ForkName::Base => None, From 5840004c368e9b1fa689d2307adda3cef555d749 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:41:08 +0800 Subject: [PATCH 13/44] Add `/lighthouse/custody/info` to Lighthouse book (#8305) Co-Authored-By: Tan Chee Keong --- book/src/api_lighthouse.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index 2e694989f9..fa093be3f6 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -447,6 +447,27 @@ indicating that all states with slots `>= 0` are available, i.e., full state his on the specific meanings of these fields see the docs on [Checkpoint Sync](./advanced_checkpoint_sync.md#how-to-run-an-archived-node). +## `/lighthouse/custody/info` + +Information about data columns custody info. + +```bash +curl "http://localhost:5052/lighthouse/custody/info" | jq +``` + +```json +{ + "earliest_custodied_data_column_slot": "8823040", + "custody_group_count": "4", + "custody_columns": [ + "117", + "72", + "31", + "79" + ] +} +``` + ## `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: From f5809aff87959ed39c9d9f5716dbeebb82de7b87 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 28 Oct 2025 08:01:09 +0400 Subject: [PATCH 14/44] Bump `ssz_types` to `v0.12.2` (#8032) https://github.com/sigp/lighthouse/issues/8012 Replace all instances of `VariableList::from` and `FixedVector::from` to their `try_from` variants. While I tried to use proper error handling in most cases, there were certain situations where adding an `expect` for situations where `try_from` can trivially never fail avoided adding a lot of extra complexity. Co-Authored-By: Mac L Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul --- Cargo.lock | 6 +- Cargo.toml | 2 +- beacon_node/beacon_chain/benches/benches.rs | 7 +- .../src/attestation_verification.rs | 33 +- beacon_node/beacon_chain/src/beacon_chain.rs | 180 +++++++--- .../src/data_column_verification.rs | 12 +- beacon_node/beacon_chain/src/errors.rs | 1 + .../beacon_chain/src/fetch_blobs/tests.rs | 2 +- beacon_node/beacon_chain/src/kzg_utils.rs | 31 +- beacon_node/beacon_chain/src/test_utils.rs | 4 +- .../beacon_chain/tests/block_verification.rs | 12 +- beacon_node/beacon_chain/tests/store_tests.rs | 2 +- .../execution_layer/src/engine_api/http.rs | 74 +++-- .../src/engine_api/json_structures.rs | 310 ++++++++++-------- .../src/engine_api/new_payload_request.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 17 +- .../test_utils/execution_block_generator.rs | 59 ++-- .../src/test_utils/handle_rpc.rs | 190 ++++++----- .../src/test_utils/mock_builder.rs | 2 +- beacon_node/genesis/src/common.rs | 15 +- .../lighthouse_network/src/rpc/codec.rs | 12 +- .../lighthouse_network/src/rpc/methods.rs | 18 +- .../lighthouse_network/src/rpc/protocol.rs | 6 +- .../lighthouse_network/tests/rpc_tests.rs | 30 +- .../gossip_methods.rs | 14 + common/eth2/src/types.rs | 4 +- consensus/fork_choice/tests/tests.rs | 4 +- .../src/per_block_processing.rs | 7 +- .../src/per_block_processing/tests.rs | 24 +- consensus/types/src/attestation.rs | 17 +- consensus/types/src/chain_spec.rs | 2 +- consensus/types/src/eth_spec.rs | 5 + consensus/types/src/light_client_bootstrap.rs | 48 ++- .../types/src/light_client_finality_update.rs | 12 +- consensus/types/src/light_client_update.rs | 36 +- consensus/types/src/test_utils/test_random.rs | 2 +- lcli/src/http_sync.rs | 4 +- slasher/src/test_utils.rs | 4 +- testing/ef_tests/src/cases/ssz_generic.rs | 13 +- 39 files changed, 758 insertions(+), 465 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1efb1fbc70..8cc058b615 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8939,14 +8939,14 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.11.0" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b55bedc9a18ed2860a46d6beb4f4082416ee1d60be0cc364cebdcdddc7afd4" +checksum = "704671195db617afa3d919da8f220f2535f20d0fa8dad96a1c27a38a5f8f6e9c" dependencies = [ "arbitrary", "ethereum_serde_utils", "ethereum_ssz", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_derive", "smallvec", diff --git a/Cargo.toml b/Cargo.toml index ae84d645bb..721102bd06 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -239,7 +239,7 @@ slashing_protection = { path = "validator_client/slashing_protection" } slot_clock = { path = "common/slot_clock" } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" -ssz_types = "0.11.0" +ssz_types = "0.12.2" state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } strum = { version = "0.24", features = ["derive"] } diff --git a/beacon_node/beacon_chain/benches/benches.rs b/beacon_node/beacon_chain/benches/benches.rs index d090fc35f7..de3ced3be1 100644 --- a/beacon_node/beacon_chain/benches/benches.rs +++ b/beacon_node/beacon_chain/benches/benches.rs @@ -26,8 +26,11 @@ fn create_test_block_and_blobs( let blobs = (0..num_of_blobs) .map(|_| Blob::::default()) .collect::>() - .into(); - let proofs = vec![KzgProof::empty(); num_of_blobs * E::number_of_columns()].into(); + .try_into() + .unwrap(); + let proofs = vec![KzgProof::empty(); num_of_blobs * E::number_of_columns()] + .try_into() + .unwrap(); (signed_block, blobs, proofs) } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 470664d442..9dc10a6be5 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -57,7 +57,7 @@ use state_processing::{ }; use std::borrow::Cow; use strum::AsRefStr; -use tracing::debug; +use tracing::{debug, error}; use tree_hash::TreeHash; use types::{ Attestation, AttestationData, AttestationRef, BeaconCommittee, @@ -267,6 +267,14 @@ pub enum Error { /// We were unable to process this attestation due to an internal error. It's unclear if the /// attestation is valid. BeaconChainError(Box), + /// A critical error occurred while converting SSZ types. + /// This can only occur when a VariableList was not able to be constructed from a single + /// attestation. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + SszTypesError(ssz_types::Error), } impl From for Error { @@ -275,6 +283,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ssz_types::Error) -> Self { + Self::SszTypesError(e) + } +} + /// Used to avoid double-checking signatures. #[derive(Copy, Clone)] enum CheckAttestationSignature { @@ -442,7 +456,18 @@ fn process_slash_info( .spec .fork_name_at_slot::(attestation.data.slot); - let indexed_attestation = attestation.to_indexed(fork_name); + let indexed_attestation = match attestation.to_indexed(fork_name) { + Ok(indexed) => indexed, + Err(e) => { + error!( + attestation_root = ?attestation.data.tree_hash_root(), + error = ?e, + "Unable to construct VariableList from a single attestation. \ + This indicates a serious bug in SSZ handling" + ); + return Error::SszTypesError(e); + } + }; (indexed_attestation, true, err) } SignatureNotCheckedIndexed(indexed, err) => (indexed, true, err), @@ -932,7 +957,9 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { .spec .fork_name_at_slot::(attestation.data.slot); - let indexed_attestation = attestation.to_indexed(fork_name); + let indexed_attestation = attestation + .to_indexed(fork_name) + .map_err(|e| SignatureNotCheckedSingle(attestation, Error::SszTypesError(e)))?; let validator_index = match Self::verify_middle_checks(attestation, chain) { Ok(t) => t, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3e02baf901..58532116e6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5483,11 +5483,21 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, _phantom: PhantomData, }, }), @@ -5504,11 +5514,21 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, _phantom: PhantomData, @@ -5531,11 +5551,21 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: block_proposal_contents @@ -5563,18 +5593,30 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: block_proposal_contents .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, }, }), None, @@ -5602,17 +5644,29 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, blob_kzg_commitments: kzg_commitments.ok_or( BlockProductionError::MissingKzgCommitment( "Kzg commitments missing from block contents".to_string(), @@ -5645,17 +5699,29 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_electra.into(), - attestations: attestations_electra.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, execution_requests: maybe_requests @@ -5687,17 +5753,29 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_electra.into(), - attestations: attestations_electra.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, execution_requests: maybe_requests @@ -5729,17 +5807,29 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_electra.into(), - attestations: attestations_electra.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, execution_requests: maybe_requests diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 07f85b045a..7a8066351a 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -868,16 +868,16 @@ mod test { let state = harness.get_current_state(); let ((block, _blobs_opt), _state) = harness .make_block_with_modifier(state, slot, |block| { - *block.body_mut().blob_kzg_commitments_mut().unwrap() = vec![].into(); + *block.body_mut().blob_kzg_commitments_mut().unwrap() = vec![].try_into().unwrap(); }) .await; let index = 0; let column_sidecar = DataColumnSidecar:: { index, - column: vec![].into(), - kzg_commitments: vec![].into(), - kzg_proofs: vec![].into(), + column: vec![].try_into().unwrap(), + kzg_commitments: vec![].try_into().unwrap(), + kzg_proofs: vec![].try_into().unwrap(), signed_block_header: block.signed_block_header(), kzg_commitments_inclusion_proof: block .message() @@ -914,7 +914,9 @@ mod test { let ((block, _blobs_opt), _state) = harness .make_block_with_modifier(state, slot, |block| { *block.body_mut().blob_kzg_commitments_mut().unwrap() = - vec![preloaded_commitments_single[0]; blob_count].into(); + vec![preloaded_commitments_single[0]; blob_count] + .try_into() + .unwrap(); }) .await; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index d4eba2b0ea..9dc6e897fb 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -318,6 +318,7 @@ pub enum BlockProductionError { KzgError(kzg::Error), FailedToBuildBlobSidecars(String), MissingExecutionRequests, + SszTypesError(ssz_types::Error), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs index e4855dd559..cbe2f78fbd 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -576,7 +576,7 @@ fn create_test_block_and_blobs( .map(|(blob, proofs)| { BlobAndProof::V2(BlobAndProofV2 { blob, - proofs: proofs.to_vec().into(), + proofs: proofs.to_vec().try_into().unwrap(), }) }) .collect() diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 382775ab50..18e14587a5 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -258,7 +258,8 @@ pub(crate) fn build_data_column_sidecars( .get(col) .ok_or(format!("Missing blob cell at index {col}"))?; let cell: Vec = cell.to_vec(); - let cell = Cell::::from(cell); + let cell = + Cell::::try_from(cell).map_err(|e| format!("BytesPerCell exceeded: {e:?}"))?; let proof = blob_cell_proofs .get(col) @@ -276,23 +277,27 @@ pub(crate) fn build_data_column_sidecars( } } - let sidecars: Vec>> = columns + let sidecars: Result>>, String> = columns .into_iter() .zip(column_kzg_proofs) .enumerate() - .map(|(index, (col, proofs))| { - Arc::new(DataColumnSidecar { - index: index as u64, - column: DataColumn::::from(col), - kzg_commitments: kzg_commitments.clone(), - kzg_proofs: VariableList::from(proofs), - signed_block_header: signed_block_header.clone(), - kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), - }) - }) + .map( + |(index, (col, proofs))| -> Result>, String> { + Ok(Arc::new(DataColumnSidecar { + index: index as u64, + column: DataColumn::::try_from(col) + .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, + kzg_commitments: kzg_commitments.clone(), + kzg_proofs: VariableList::try_from(proofs) + .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, + signed_block_header: signed_block_header.clone(), + kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), + })) + }, + ) .collect(); - Ok(sidecars) + sidecars } /// Reconstruct blobs from a subset of data column sidecars (requires at least 50%). diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 38797d0264..c1d1d9de67 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2324,7 +2324,7 @@ where .collect::>(); // Building a VarList from leaves - let deposit_data_list = VariableList::<_, U4294967296>::from(leaves.clone()); + let deposit_data_list = VariableList::<_, U4294967296>::try_from(leaves.clone()).unwrap(); // Setting the deposit_root to be the tree_hash_root of the VarList state.eth1_data_mut().deposit_root = deposit_data_list.tree_hash_root(); @@ -2348,7 +2348,7 @@ where let deposits = datas .into_par_iter() .zip(proofs.into_par_iter()) - .map(|(data, proof)| (data, proof.into())) + .map(|(data, proof)| (data, proof.try_into().unwrap())) .map(|(data, proof)| Deposit { proof, data }) .collect::>(); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 7dfef50ea1..3d1fa8f4af 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -707,7 +707,7 @@ async fn invalid_signature_attester_slashing() { let attester_slashing = if fork_name.electra_enabled() { let indexed_attestation = IndexedAttestationElectra { - attesting_indices: vec![0].into(), + attesting_indices: vec![0].try_into().unwrap(), data: AttestationData { slot: Slot::new(0), index: 0, @@ -731,7 +731,7 @@ async fn invalid_signature_attester_slashing() { AttesterSlashing::Electra(attester_slashing) } else { let indexed_attestation = IndexedAttestationBase { - attesting_indices: vec![0].into(), + attesting_indices: vec![0].try_into().unwrap(), data: AttestationData { slot: Slot::new(0), index: 0, @@ -898,7 +898,9 @@ async fn invalid_signature_deposit() { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); let deposit = Deposit { - proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1].into(), + proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1] + .try_into() + .unwrap(), data: DepositData { pubkey: Keypair::random().pk.into(), withdrawal_credentials: Hash256::zero(), @@ -1270,7 +1272,9 @@ async fn block_gossip_verification() { as usize; if let Ok(kzg_commitments) = block.body_mut().blob_kzg_commitments_mut() { - *kzg_commitments = vec![KzgCommitment::empty_for_testing(); kzg_commitments_len + 1].into(); + *kzg_commitments = vec![KzgCommitment::empty_for_testing(); kzg_commitments_len + 1] + .try_into() + .unwrap(); assert!( matches!( unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 53e841692e..0a261e36ce 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1415,7 +1415,7 @@ async fn proposer_shuffling_changing_with_lookahead() { let execution_requests = ExecutionRequests:: { deposits: VariableList::new(vec![deposit_request]).unwrap(), - withdrawals: vec![].into(), + withdrawals: vec![].try_into().unwrap(), consolidations: VariableList::new(vec![consolidation_request]).unwrap(), }; diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bc927e19b4..74fb078510 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -768,7 +768,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayload::from(execution_payload)]); + let params = json!([JsonExecutionPayload::try_from(execution_payload)?]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -785,7 +785,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayload::from(execution_payload)]); + let params = json!([JsonExecutionPayload::try_from(execution_payload)?]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -803,7 +803,12 @@ impl HttpJsonRpc { new_payload_request_deneb: NewPayloadRequestDeneb<'_, E>, ) -> Result { let params = json!([ - JsonExecutionPayload::Deneb(new_payload_request_deneb.execution_payload.clone().into()), + JsonExecutionPayload::Deneb( + new_payload_request_deneb + .execution_payload + .clone() + .try_into()? + ), new_payload_request_deneb.versioned_hashes, new_payload_request_deneb.parent_beacon_block_root, ]); @@ -825,7 +830,10 @@ impl HttpJsonRpc { ) -> Result { let params = json!([ JsonExecutionPayload::Electra( - new_payload_request_electra.execution_payload.clone().into() + new_payload_request_electra + .execution_payload + .clone() + .try_into()? ), new_payload_request_electra.versioned_hashes, new_payload_request_electra.parent_beacon_block_root, @@ -850,7 +858,12 @@ impl HttpJsonRpc { new_payload_request_fulu: NewPayloadRequestFulu<'_, E>, ) -> Result { let params = json!([ - JsonExecutionPayload::Fulu(new_payload_request_fulu.execution_payload.clone().into()), + JsonExecutionPayload::Fulu( + new_payload_request_fulu + .execution_payload + .clone() + .try_into()? + ), new_payload_request_fulu.versioned_hashes, new_payload_request_fulu.parent_beacon_block_root, new_payload_request_fulu @@ -874,7 +887,12 @@ impl HttpJsonRpc { new_payload_request_gloas: NewPayloadRequestGloas<'_, E>, ) -> Result { let params = json!([ - JsonExecutionPayload::Gloas(new_payload_request_gloas.execution_payload.clone().into()), + JsonExecutionPayload::Gloas( + new_payload_request_gloas + .execution_payload + .clone() + .try_into()? + ), new_payload_request_gloas.versioned_hashes, new_payload_request_gloas.parent_beacon_block_root, new_payload_request_gloas @@ -1125,10 +1143,14 @@ impl HttpJsonRpc { ) .await?; - Ok(response + response .into_iter() - .map(|opt_json| opt_json.map(From::from)) - .collect()) + .map(|opt_json| { + opt_json + .map(|json| json.try_into().map_err(Error::from)) + .transpose() + }) + .collect::, _>>() } pub async fn get_payload_bodies_by_range_v1( @@ -1149,10 +1171,14 @@ impl HttpJsonRpc { ) .await?; - Ok(response + response .into_iter() - .map(|opt_json| opt_json.map(From::from)) - .collect()) + .map(|opt_json| { + opt_json + .map(|json| json.try_into().map_err(Error::from)) + .transpose() + }) + .collect::, _>>() } pub async fn exchange_capabilities(&self) -> Result { @@ -1814,16 +1840,16 @@ mod test { fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), + logs_bloom: vec![1; 256].try_into().unwrap(), prev_randao: Hash256::repeat_byte(1), block_number: 0, gas_limit: 1, gas_used: 2, timestamp: 42, - extra_data: vec![].into(), + extra_data: vec![].try_into().unwrap(), base_fee_per_gas: Uint256::from(1), block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }, )) .await; @@ -1861,16 +1887,16 @@ mod test { fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), + logs_bloom: vec![1; 256].try_into().unwrap(), prev_randao: Hash256::repeat_byte(1), block_number: 0, gas_limit: 1, gas_used: 2, timestamp: 42, - extra_data: vec![].into(), + extra_data: vec![].try_into().unwrap(), base_fee_per_gas: Uint256::from(1), block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }, )) .await @@ -2071,16 +2097,16 @@ mod test { fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: Hash256::zero(), block_number: 1, gas_limit: u64::from_str_radix("1c95111",16).unwrap(), gas_used: 0, timestamp: 5, - extra_data: vec![].into(), + extra_data: vec![].try_into().unwrap(), base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }); assert_eq!(payload, expected); @@ -2096,16 +2122,16 @@ mod test { fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: Hash256::zero(), block_number: 1, gas_limit: u64::from_str_radix("1c9c380",16).unwrap(), gas_used: 0, timestamp: 5, - extra_data: vec![].into(), + extra_data: vec![].try_into().unwrap(), base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), })) .await; }, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 33decd4ec8..cc46070325 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,7 +1,8 @@ use super::*; use alloy_rlp::RlpEncodable; use serde::{Deserialize, Serialize}; -use ssz::Decode; +use ssz::{Decode, TryFromIter}; +use ssz_types::{FixedVector, VariableList, typenum::Unsigned}; use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; @@ -9,7 +10,7 @@ use types::blob_sidecar::BlobsList; use types::execution_requests::{ ConsolidationRequests, DepositRequests, RequestType, WithdrawalRequests, }; -use types::{Blob, FixedVector, KzgProof, Unsigned}; +use types::{Blob, KzgProof}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -130,9 +131,11 @@ impl From> for JsonExecutionPayloadBell } } } -impl From> for JsonExecutionPayloadCapella { - fn from(payload: ExecutionPayloadCapella) -> Self { - JsonExecutionPayloadCapella { +impl TryFrom> for JsonExecutionPayloadCapella { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadCapella) -> Result { + Ok(JsonExecutionPayloadCapella { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -147,18 +150,15 @@ impl From> for JsonExecutionPayloadCapell base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), - } + withdrawals: withdrawals_to_json(payload.withdrawals)?, + }) } } -impl From> for JsonExecutionPayloadDeneb { - fn from(payload: ExecutionPayloadDeneb) -> Self { - JsonExecutionPayloadDeneb { +impl TryFrom> for JsonExecutionPayloadDeneb { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadDeneb) -> Result { + Ok(JsonExecutionPayloadDeneb { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -173,21 +173,18 @@ impl From> for JsonExecutionPayloadDeneb base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for JsonExecutionPayloadElectra { - fn from(payload: ExecutionPayloadElectra) -> Self { - JsonExecutionPayloadElectra { +impl TryFrom> for JsonExecutionPayloadElectra { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadElectra) -> Result { + Ok(JsonExecutionPayloadElectra { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -202,21 +199,18 @@ impl From> for JsonExecutionPayloadElectr base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for JsonExecutionPayloadFulu { - fn from(payload: ExecutionPayloadFulu) -> Self { - JsonExecutionPayloadFulu { +impl TryFrom> for JsonExecutionPayloadFulu { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadFulu) -> Result { + Ok(JsonExecutionPayloadFulu { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -231,21 +225,18 @@ impl From> for JsonExecutionPayloadFulu { base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for JsonExecutionPayloadGloas { - fn from(payload: ExecutionPayloadGloas) -> Self { - JsonExecutionPayloadGloas { +impl TryFrom> for JsonExecutionPayloadGloas { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadGloas) -> Result { + Ok(JsonExecutionPayloadGloas { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -260,27 +251,34 @@ impl From> for JsonExecutionPayloadGloas base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for JsonExecutionPayload { - fn from(execution_payload: ExecutionPayload) -> Self { +impl TryFrom> for JsonExecutionPayload { + type Error = ssz_types::Error; + + fn try_from(execution_payload: ExecutionPayload) -> Result { match execution_payload { - ExecutionPayload::Bellatrix(payload) => JsonExecutionPayload::Bellatrix(payload.into()), - ExecutionPayload::Capella(payload) => JsonExecutionPayload::Capella(payload.into()), - ExecutionPayload::Deneb(payload) => JsonExecutionPayload::Deneb(payload.into()), - ExecutionPayload::Electra(payload) => JsonExecutionPayload::Electra(payload.into()), - ExecutionPayload::Fulu(payload) => JsonExecutionPayload::Fulu(payload.into()), - ExecutionPayload::Gloas(payload) => JsonExecutionPayload::Gloas(payload.into()), + ExecutionPayload::Bellatrix(payload) => { + Ok(JsonExecutionPayload::Bellatrix(payload.into())) + } + ExecutionPayload::Capella(payload) => { + Ok(JsonExecutionPayload::Capella(payload.try_into()?)) + } + ExecutionPayload::Deneb(payload) => { + Ok(JsonExecutionPayload::Deneb(payload.try_into()?)) + } + ExecutionPayload::Electra(payload) => { + Ok(JsonExecutionPayload::Electra(payload.try_into()?)) + } + ExecutionPayload::Fulu(payload) => Ok(JsonExecutionPayload::Fulu(payload.try_into()?)), + ExecutionPayload::Gloas(payload) => { + Ok(JsonExecutionPayload::Gloas(payload.try_into()?)) + } } } } @@ -305,9 +303,11 @@ impl From> for ExecutionPayloadBell } } } -impl From> for ExecutionPayloadCapella { - fn from(payload: JsonExecutionPayloadCapella) -> Self { - ExecutionPayloadCapella { +impl TryFrom> for ExecutionPayloadCapella { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadCapella) -> Result { + Ok(ExecutionPayloadCapella { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -322,19 +322,16 @@ impl From> for ExecutionPayloadCapell base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), - } + withdrawals: withdrawals_from_json(payload.withdrawals)?, + }) } } -impl From> for ExecutionPayloadDeneb { - fn from(payload: JsonExecutionPayloadDeneb) -> Self { - ExecutionPayloadDeneb { +impl TryFrom> for ExecutionPayloadDeneb { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadDeneb) -> Result { + Ok(ExecutionPayloadDeneb { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -349,21 +346,18 @@ impl From> for ExecutionPayloadDeneb base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for ExecutionPayloadElectra { - fn from(payload: JsonExecutionPayloadElectra) -> Self { - ExecutionPayloadElectra { +impl TryFrom> for ExecutionPayloadElectra { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadElectra) -> Result { + Ok(ExecutionPayloadElectra { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -378,21 +372,18 @@ impl From> for ExecutionPayloadElectr base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for ExecutionPayloadFulu { - fn from(payload: JsonExecutionPayloadFulu) -> Self { - ExecutionPayloadFulu { +impl TryFrom> for ExecutionPayloadFulu { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadFulu) -> Result { + Ok(ExecutionPayloadFulu { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -407,21 +398,18 @@ impl From> for ExecutionPayloadFulu { base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for ExecutionPayloadGloas { - fn from(payload: JsonExecutionPayloadGloas) -> Self { - ExecutionPayloadGloas { +impl TryFrom> for ExecutionPayloadGloas { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadGloas) -> Result { + Ok(ExecutionPayloadGloas { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -436,27 +424,34 @@ impl From> for ExecutionPayloadGloas base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for ExecutionPayload { - fn from(json_execution_payload: JsonExecutionPayload) -> Self { +impl TryFrom> for ExecutionPayload { + type Error = ssz_types::Error; + + fn try_from(json_execution_payload: JsonExecutionPayload) -> Result { match json_execution_payload { - JsonExecutionPayload::Bellatrix(payload) => ExecutionPayload::Bellatrix(payload.into()), - JsonExecutionPayload::Capella(payload) => ExecutionPayload::Capella(payload.into()), - JsonExecutionPayload::Deneb(payload) => ExecutionPayload::Deneb(payload.into()), - JsonExecutionPayload::Electra(payload) => ExecutionPayload::Electra(payload.into()), - JsonExecutionPayload::Fulu(payload) => ExecutionPayload::Fulu(payload.into()), - JsonExecutionPayload::Gloas(payload) => ExecutionPayload::Gloas(payload.into()), + JsonExecutionPayload::Bellatrix(payload) => { + Ok(ExecutionPayload::Bellatrix(payload.into())) + } + JsonExecutionPayload::Capella(payload) => { + Ok(ExecutionPayload::Capella(payload.try_into()?)) + } + JsonExecutionPayload::Deneb(payload) => { + Ok(ExecutionPayload::Deneb(payload.try_into()?)) + } + JsonExecutionPayload::Electra(payload) => { + Ok(ExecutionPayload::Electra(payload.try_into()?)) + } + JsonExecutionPayload::Fulu(payload) => Ok(ExecutionPayload::Fulu(payload.try_into()?)), + JsonExecutionPayload::Gloas(payload) => { + Ok(ExecutionPayload::Gloas(payload.try_into()?)) + } } } } @@ -590,13 +585,17 @@ impl TryFrom> for GetPayloadResponse { } JsonGetPayloadResponse::Capella(response) => { Ok(GetPayloadResponse::Capella(GetPayloadResponseCapella { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, })) } JsonGetPayloadResponse::Deneb(response) => { Ok(GetPayloadResponse::Deneb(GetPayloadResponseDeneb { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, @@ -604,34 +603,40 @@ impl TryFrom> for GetPayloadResponse { } JsonGetPayloadResponse::Electra(response) => { Ok(GetPayloadResponse::Electra(GetPayloadResponseElectra { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, requests: response.execution_requests.try_into().map_err(|e| { - format!("Failed to convert json to execution requests : {:?}", e) + format!("Failed to convert json to execution requests: {:?}", e) })?, })) } JsonGetPayloadResponse::Fulu(response) => { Ok(GetPayloadResponse::Fulu(GetPayloadResponseFulu { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, requests: response.execution_requests.try_into().map_err(|e| { - format!("Failed to convert json to execution requests {:?}", e) + format!("Failed to convert json to execution requests: {:?}", e) })?, })) } JsonGetPayloadResponse::Gloas(response) => { Ok(GetPayloadResponse::Gloas(GetPayloadResponseGloas { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, requests: response.execution_requests.try_into().map_err(|e| { - format!("Failed to convert json to execution requests {:?}", e) + format!("Failed to convert json to execution requests: {:?}", e) })?, })) } @@ -673,6 +678,26 @@ impl From for Withdrawal { } } } + +// Helper functions to convert between `VariableList` and `VariableList`. +fn withdrawals_to_json( + list: VariableList, +) -> Result, ssz_types::Error> +where + N: Unsigned, +{ + VariableList::try_from_iter(list.into_iter().map(Into::into)) +} + +fn withdrawals_from_json( + list: VariableList, +) -> Result, ssz_types::Error> +where + N: Unsigned, +{ + VariableList::try_from_iter(list.into_iter().map(Into::into)) +} + #[derive(Debug, PartialEq, Clone, RlpEncodable)] pub struct EncodableJsonWithdrawal<'a> { pub index: u64, @@ -976,30 +1001,25 @@ pub struct JsonExecutionPayloadBodyV1 { pub withdrawals: Option>, } -impl From> for ExecutionPayloadBodyV1 { - fn from(value: JsonExecutionPayloadBodyV1) -> Self { - Self { +impl TryFrom> for ExecutionPayloadBodyV1 { + type Error = ssz_types::Error; + + fn try_from(value: JsonExecutionPayloadBodyV1) -> Result { + Ok(Self { transactions: value.transactions, - withdrawals: value.withdrawals.map(|json_withdrawals| { - Withdrawals::::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - } + withdrawals: value.withdrawals.map(withdrawals_from_json).transpose()?, + }) } } -impl From> for JsonExecutionPayloadBodyV1 { - fn from(value: ExecutionPayloadBodyV1) -> Self { - Self { +impl TryFrom> for JsonExecutionPayloadBodyV1 { + type Error = ssz_types::Error; + + fn try_from(value: ExecutionPayloadBodyV1) -> Result { + Ok(Self { transactions: value.transactions, - withdrawals: value.withdrawals.map(|withdrawals| { - VariableList::from(withdrawals.into_iter().map(Into::into).collect::>()) - }), - } + withdrawals: value.withdrawals.map(withdrawals_to_json).transpose()?, + }) } } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index aa5261c80b..617d2e0112 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -337,7 +337,7 @@ mod test { *beacon_block .body_mut() .blob_kzg_commitments_mut() - .expect("should get commitments") = commitments.into(); + .expect("should get commitments") = commitments.try_into().unwrap(); let new_payload_request = NewPayloadRequest::try_from(beacon_block.to_ref()) .expect("should create new payload request"); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index a5fa0f3415..4175abf724 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -171,11 +171,18 @@ pub enum Error { InvalidPayloadBody(String), InvalidPayloadConversion, InvalidBlobConversion(String), + SszTypesError(ssz_types::Error), BeaconStateError(BeaconStateError), PayloadTypeMismatch, VerifyingVersionedHashes(versioned_hashes::Error), } +impl From for Error { + fn from(e: ssz_types::Error) -> Self { + Error::SszTypesError(e) + } +} + impl From for Error { fn from(e: BeaconStateError) -> Self { Error::BeaconStateError(e) @@ -2102,6 +2109,7 @@ enum InvalidBuilderPayload { payload: u64, expected: u64, }, + SszTypesError(ssz_types::Error), } impl fmt::Display for InvalidBuilderPayload { @@ -2143,6 +2151,7 @@ impl fmt::Display for InvalidBuilderPayload { InvalidBuilderPayload::GasLimitMismatch { payload, expected } => { write!(f, "payload gas limit was {} not {}", payload, expected) } + Self::SszTypesError(e) => write!(f, "{:?}", e), } } } @@ -2198,7 +2207,13 @@ fn verify_builder_bid( .withdrawals() .ok() .cloned() - .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); + .map(|withdrawals| { + Withdrawals::::try_from(withdrawals) + .map_err(InvalidBuilderPayload::SszTypesError) + .map(|w| w.tree_hash_root()) + }) + .transpose()?; + let payload_withdrawals_root = header.withdrawals_root().ok(); let expected_gas_limit = proposer_gas_limit .and_then(|target_gas_limit| expected_gas_limit(parent_gas_limit, target_gas_limit, spec)); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 4836f9307c..29c764ee30 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -40,7 +40,7 @@ pub enum Block { } pub fn mock_el_extra_data() -> types::VariableList { - "block gen was here".as_bytes().to_vec().into() + "block gen was here".as_bytes().to_vec().try_into().unwrap() } impl Block { @@ -602,7 +602,7 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -611,7 +611,7 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }), PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) { ForkName::Bellatrix => ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { @@ -619,7 +619,7 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -628,14 +628,14 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }), ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella { parent_hash: head_block_hash, fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -644,8 +644,8 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), }), _ => unreachable!(), }, @@ -655,7 +655,7 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -664,8 +664,8 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, }), @@ -674,7 +674,7 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -683,8 +683,8 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, }), @@ -693,17 +693,17 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: "block gen was here".as_bytes().to_vec().try_into().unwrap(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, }), @@ -712,17 +712,17 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: "block gen was here".as_bytes().to_vec().try_into().unwrap(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, }), @@ -813,24 +813,25 @@ pub fn generate_blobs( let bundle = if fork_name.fulu_enabled() { let (kzg_commitment, kzg_proofs, blob) = load_test_blobs_bundle_v2::()?; BlobsBundle { - commitments: vec![kzg_commitment; n_blobs].into(), + commitments: vec![kzg_commitment; n_blobs].try_into().unwrap(), proofs: vec![kzg_proofs.to_vec(); n_blobs] .into_iter() .flatten() .collect::>() - .into(), - blobs: vec![blob; n_blobs].into(), + .try_into() + .unwrap(), + blobs: vec![blob; n_blobs].try_into().unwrap(), } } else { let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle_v1::()?; BlobsBundle { - commitments: vec![kzg_commitment; n_blobs].into(), - proofs: vec![kzg_proof; n_blobs].into(), - blobs: vec![blob; n_blobs].into(), + commitments: vec![kzg_commitment; n_blobs].try_into().unwrap(), + proofs: vec![kzg_proof; n_blobs].try_into().unwrap(), + blobs: vec![blob; n_blobs].try_into().unwrap(), } }; - Ok((bundle, transactions.into())) + Ok((bundle, transactions.try_into().unwrap())) } pub fn static_valid_tx() -> Result, String> { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 7a451beddb..2168ed8961 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -252,7 +252,7 @@ pub async fn handle_rpc( Some( ctx.execution_block_generator .write() - .new_payload(request.into()), + .new_payload(request.try_into().unwrap()), ) } else { None @@ -361,98 +361,107 @@ pub async fn handle_rpc( } match method { - ENGINE_GET_PAYLOAD_V1 => { - Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) + ENGINE_GET_PAYLOAD_V1 => Ok(serde_json::to_value( + JsonExecutionPayload::try_from(response).unwrap(), + ) + .unwrap()), + ENGINE_GET_PAYLOAD_V2 => { + Ok(match JsonExecutionPayload::try_from(response).unwrap() { + JsonExecutionPayload::Bellatrix(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseBellatrix { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + }) + .unwrap() + } + JsonExecutionPayload::Capella(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseCapella { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + }) + .unwrap() + } + _ => unreachable!(), + }) } - ENGINE_GET_PAYLOAD_V2 => Ok(match JsonExecutionPayload::from(response) { - JsonExecutionPayload::Bellatrix(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseBellatrix { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - }) - .unwrap() - } - JsonExecutionPayload::Capella(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseCapella { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - }) - .unwrap() - } - _ => unreachable!(), - }), // From v3 onwards, we use the getPayload version only for the corresponding // ExecutionPayload version. So we return an error if the ExecutionPayload version // we get does not correspond to the getPayload version. - ENGINE_GET_PAYLOAD_V3 => Ok(match JsonExecutionPayload::from(response) { - JsonExecutionPayload::Deneb(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseDeneb { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V3 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - }) - .unwrap() - } - _ => unreachable!(), - }), - ENGINE_GET_PAYLOAD_V4 => Ok(match JsonExecutionPayload::from(response) { - JsonExecutionPayload::Electra(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseElectra { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V4 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - // TODO(electra): add EL requests in mock el - execution_requests: Default::default(), - }) - .unwrap() - } - _ => unreachable!(), - }), - ENGINE_GET_PAYLOAD_V5 => Ok(match JsonExecutionPayload::from(response) { - JsonExecutionPayload::Fulu(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseFulu { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V5 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - execution_requests: Default::default(), - }) - .unwrap() - } - JsonExecutionPayload::Gloas(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseGloas { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V5 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - execution_requests: Default::default(), - }) - .unwrap() - } - _ => unreachable!(), - }), + ENGINE_GET_PAYLOAD_V3 => { + Ok(match JsonExecutionPayload::try_from(response).unwrap() { + JsonExecutionPayload::Deneb(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseDeneb { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V3 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + }) + .unwrap() + } + _ => unreachable!(), + }) + } + ENGINE_GET_PAYLOAD_V4 => { + Ok(match JsonExecutionPayload::try_from(response).unwrap() { + JsonExecutionPayload::Electra(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseElectra { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V4 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + // TODO(electra): add EL requests in mock el + execution_requests: Default::default(), + }) + .unwrap() + } + _ => unreachable!(), + }) + } + ENGINE_GET_PAYLOAD_V5 => { + Ok(match JsonExecutionPayload::try_from(response).unwrap() { + JsonExecutionPayload::Fulu(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseFulu { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V5 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + execution_requests: Default::default(), + }) + .unwrap() + } + JsonExecutionPayload::Gloas(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseGloas { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V5 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + execution_requests: Default::default(), + }) + .unwrap() + } + _ => unreachable!(), + }) + } _ => unreachable!(), } } @@ -644,7 +653,8 @@ pub async fn handle_rpc( transactions: payload.transactions().clone(), withdrawals: payload.withdrawals().ok().cloned(), }; - let json_payload_body = JsonExecutionPayloadBodyV1::from(payload_body); + let json_payload_body: JsonExecutionPayloadBodyV1 = + payload_body.try_into().unwrap(); response.push(Some(json_payload_body)); } None => response.push(None), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 6b63881d85..f0991f1733 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -72,7 +72,7 @@ impl Operation { } pub fn mock_builder_extra_data() -> types::VariableList { - "mock_builder".as_bytes().to_vec().into() + "mock_builder".as_bytes().to_vec().try_into().unwrap() } #[derive(Debug)] diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs index e48fa36204..88a88810d8 100644 --- a/beacon_node/genesis/src/common.rs +++ b/beacon_node/genesis/src/common.rs @@ -37,10 +37,17 @@ pub fn genesis_deposits( proofs.push(proof); } - Ok(deposit_data + deposit_data .into_iter() .zip(proofs) - .map(|(data, proof)| (data, proof.into())) - .map(|(data, proof)| Deposit { proof, data }) - .collect()) + .map(|(data, proof)| { + let converted_proof = proof + .try_into() + .map_err(|e| format!("Error converting proof: {:?}", e))?; + Ok(Deposit { + proof: converted_proof, + data, + }) + }) + .collect() } diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index acb0188456..77d2a34e16 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -1002,8 +1002,9 @@ mod tests { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); + let tx = VariableList::try_from(vec![0; 1024]).unwrap(); + let txs = + VariableList::try_from(std::iter::repeat_n(tx, 5000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; @@ -1021,8 +1022,9 @@ mod tests { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); + let tx = VariableList::try_from(vec![0; 1024]).unwrap(); + let txs = + VariableList::try_from(std::iter::repeat_n(tx, 100000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; @@ -1080,7 +1082,7 @@ mod tests { data_column_ids: RuntimeVariableList::new( vec![DataColumnsByRootIdentifier { block_root: Hash256::zero(), - columns: VariableList::from(vec![0, 1, 2]), + columns: VariableList::try_from(vec![0, 1, 2]).unwrap(), }], spec.max_request_blocks(fork_name), ) diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9319973e59..9aab079952 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -29,15 +29,21 @@ pub const MAX_ERROR_LEN: u64 = 256; #[derive(Debug, Clone)] pub struct ErrorType(pub VariableList); -impl From for ErrorType { - fn from(s: String) -> Self { - Self(VariableList::from(s.as_bytes().to_vec())) +impl From<&str> for ErrorType { + // This will truncate the error if `string.as_bytes()` exceeds `MaxErrorLen`. + fn from(s: &str) -> Self { + let mut bytes = s.as_bytes().to_vec(); + bytes.truncate(MAX_ERROR_LEN as usize); + Self( + VariableList::try_from(bytes) + .expect("length should not exceed MaxErrorLen after truncation"), + ) } } -impl From<&str> for ErrorType { - fn from(s: &str) -> Self { - Self(VariableList::from(s.as_bytes().to_vec())) +impl From for ErrorType { + fn from(s: String) -> Self { + Self::from(s.as_str()) } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 228a74f08c..08085f3c27 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -70,13 +70,15 @@ pub static BLOB_SIDECAR_SIZE_MINIMAL: LazyLock = LazyLock::new(BlobSidecar::::max_size); pub static ERROR_TYPE_MIN: LazyLock = LazyLock::new(|| { - VariableList::::from(Vec::::new()) + VariableList::::try_from(Vec::::new()) + .expect("MaxErrorLen should not exceed MAX_ERROR_LEN") .as_ssz_bytes() .len() }); pub static ERROR_TYPE_MAX: LazyLock = LazyLock::new(|| { - VariableList::::from(vec![0u8; MAX_ERROR_LEN as usize]) + VariableList::::try_from(vec![0u8; MAX_ERROR_LEN as usize]) + .expect("MaxErrorLen should not exceed MAX_ERROR_LEN") .as_ssz_bytes() .len() }); diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index e37f4131a7..81d08764a5 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -26,8 +26,8 @@ type E = MinimalEthSpec; /// Bellatrix block with length < max_rpc_size. fn bellatrix_block_small(spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); + let tx = VariableList::try_from(vec![0; 1024]).unwrap(); + let txs = VariableList::try_from(std::iter::repeat_n(tx, 5000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; @@ -41,8 +41,8 @@ fn bellatrix_block_small(spec: &ChainSpec) -> BeaconBlock { /// Hence, we generate a bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. fn bellatrix_block_large(spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); + let tx = VariableList::try_from(vec![0; 1024]).unwrap(); + let txs = VariableList::try_from(std::iter::repeat_n(tx, 100000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; @@ -1018,14 +1018,17 @@ fn test_tcp_columns_by_root_chunked_rpc() { }, signature: Signature::empty(), }, - column: vec![vec![0; E::bytes_per_blob()].into()].into(), - kzg_commitments: vec![KzgCommitment::empty_for_testing()].into(), - kzg_proofs: vec![KzgProof::empty()].into(), + column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + .try_into() + .unwrap(), + kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), kzg_commitments_inclusion_proof: vec![ Hash256::zero(); E::kzg_commitments_inclusion_proof_depth() ] - .into(), + .try_into() + .unwrap(), }); let rpc_response = Response::DataColumnsByRoot(Some(data_column.clone())); @@ -1160,14 +1163,17 @@ fn test_tcp_columns_by_range_chunked_rpc() { }, signature: Signature::empty(), }, - column: vec![vec![0; E::bytes_per_blob()].into()].into(), - kzg_commitments: vec![KzgCommitment::empty_for_testing()].into(), - kzg_proofs: vec![KzgProof::empty()].into(), + column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + .try_into() + .unwrap(), + kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), kzg_commitments_inclusion_proof: vec![ Hash256::zero(); E::kzg_commitments_inclusion_proof_depth() ] - .into(), + .try_into() + .unwrap(), }); let rpc_response = Response::DataColumnsByRange(Some(data_column.clone())); diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index a492ece508..1ffe921e58 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -2742,6 +2742,20 @@ impl NetworkBeaconProcessor { } } } + AttnError::SszTypesError(_) => { + error!( + %peer_id, + block = ?beacon_block_root, + ?attestation_type, + "Rejecting attestation due to a critical SSZ types error" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "attn_ssz_types_error", + ); + } } debug!( diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 8f553b57d9..60bc0804e4 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1690,8 +1690,8 @@ mod tests { BeaconBlock::::Deneb(BeaconBlockDeneb::empty(&spec)), Signature::empty(), ); - let blobs = BlobsList::::from(vec![Blob::::default()]); - let kzg_proofs = KzgProofs::::from(vec![KzgProof::empty()]); + let blobs = BlobsList::::try_from(vec![Blob::::default()]).unwrap(); + let kzg_proofs = KzgProofs::::try_from(vec![KzgProof::empty()]).unwrap(); let signed_block_contents = PublishBlockRequest::new(Arc::new(block), Some((kzg_proofs, blobs))); diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 25c3f03d3b..67b792ef0d 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -753,10 +753,10 @@ async fn invalid_attestation_empty_bitfield() { MutationDelay::NoDelay, |attestation, _| match attestation { IndexedAttestation::Base(att) => { - att.attesting_indices = vec![].into(); + att.attesting_indices = vec![].try_into().unwrap(); } IndexedAttestation::Electra(att) => { - att.attesting_indices = vec![].into(); + att.attesting_indices = vec![].try_into().unwrap(); } }, |result| { diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 99abbef9c1..9e7a20040e 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -628,7 +628,12 @@ pub fn get_expected_withdrawals( .safe_rem(state.validators().len() as u64)?; } - Ok((withdrawals.into(), processed_partial_withdrawals_count)) + Ok(( + withdrawals + .try_into() + .map_err(BlockProcessingError::SszTypesError)?, + processed_partial_withdrawals_count, + )) } /// Apply withdrawals to the state. diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 183063ac76..c32797f77f 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -213,7 +213,7 @@ async fn valid_4_deposits() { let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 4, None, None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -237,7 +237,7 @@ async fn invalid_deposit_deposit_count_too_big() { let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -269,7 +269,7 @@ async fn invalid_deposit_count_too_small() { let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -301,7 +301,7 @@ async fn invalid_deposit_bad_merkle_proof() { let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -336,7 +336,7 @@ async fn invalid_deposit_wrong_sig() { let (deposits, state) = harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty())); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -360,7 +360,7 @@ async fn invalid_deposit_invalid_pub_key() { let (deposits, state) = harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -753,10 +753,12 @@ async fn invalid_attester_slashing_1_invalid() { let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { AttesterSlashing::Base(attester_slashing) => { - attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); + attester_slashing.attestation_1.attesting_indices = + VariableList::try_from(vec![2, 1]).unwrap(); } AttesterSlashing::Electra(attester_slashing) => { - attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); + attester_slashing.attestation_1.attesting_indices = + VariableList::try_from(vec![2, 1]).unwrap(); } } @@ -791,10 +793,12 @@ async fn invalid_attester_slashing_2_invalid() { let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { AttesterSlashing::Base(attester_slashing) => { - attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); + attester_slashing.attestation_2.attesting_indices = + VariableList::try_from(vec![2, 1]).unwrap(); } AttesterSlashing::Electra(attester_slashing) => { - attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); + attester_slashing.attestation_2.attesting_indices = + VariableList::try_from(vec![2, 1]).unwrap(); } } diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 860f0d0a2d..5264686792 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -622,19 +622,22 @@ pub struct SingleAttestation { } impl SingleAttestation { - pub fn to_indexed(&self, fork_name: ForkName) -> IndexedAttestation { + pub fn to_indexed( + &self, + fork_name: ForkName, + ) -> Result, ssz_types::Error> { if fork_name.electra_enabled() { - IndexedAttestation::Electra(IndexedAttestationElectra { - attesting_indices: vec![self.attester_index].into(), + Ok(IndexedAttestation::Electra(IndexedAttestationElectra { + attesting_indices: vec![self.attester_index].try_into()?, data: self.data.clone(), signature: self.signature.clone(), - }) + })) } else { - IndexedAttestation::Base(IndexedAttestationBase { - attesting_indices: vec![self.attester_index].into(), + Ok(IndexedAttestation::Base(IndexedAttestationBase { + attesting_indices: vec![self.attester_index].try_into()?, data: self.data.clone(), signature: self.signature.clone(), - }) + })) } } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 93f5140383..fd87c8c222 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -2067,7 +2067,7 @@ fn max_data_columns_by_root_request_common(max_request_blocks: u64) let empty_data_columns_by_root_id = DataColumnsByRootIdentifier { block_root: Hash256::zero(), - columns: VariableList::from(vec![0; E::number_of_columns()]), + columns: VariableList::repeat_full(0), }; RuntimeVariableList::>::new( diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index e001cf0e4e..47d32ad9e4 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -310,6 +310,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Self::BytesPerBlob::to_usize() } + /// Returns the `BYTES_PER_CELL` constant for this specification. + fn bytes_per_cell() -> usize { + Self::BytesPerCell::to_usize() + } + /// Returns the `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` preset for this specification. fn kzg_proof_inclusion_proof_depth() -> usize { Self::KzgCommitmentInclusionProofDepth::to_usize() diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 5850db876c..530ccd88de 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -151,32 +151,44 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas { header: LightClientHeaderGloas::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), }; @@ -201,32 +213,44 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas { header: LightClientHeaderGloas::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, }), }; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 4fa98de40b..644824f12c 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -116,7 +116,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate, signature_slot, }) @@ -128,7 +128,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -139,7 +139,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -150,7 +150,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -161,7 +161,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderFulu::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -172,7 +172,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderGloas::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate, signature_slot, }), diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index bf1a8c614a..afb7ebc96d 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -261,9 +261,11 @@ impl LightClientUpdate { Self::Altair(LightClientUpdateAltair { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -285,9 +287,11 @@ impl LightClientUpdate { Self::Capella(LightClientUpdateCapella { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -309,9 +313,11 @@ impl LightClientUpdate { Self::Deneb(LightClientUpdateDeneb { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -333,9 +339,11 @@ impl LightClientUpdate { Self::Electra(LightClientUpdateElectra { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -357,9 +365,11 @@ impl LightClientUpdate { Self::Fulu(LightClientUpdateFulu { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -381,9 +391,11 @@ impl LightClientUpdate { Self::Gloas(LightClientUpdateGloas { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(Error::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 98bb8565dd..7c8f86e14d 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -115,7 +115,7 @@ where } } - output.into() + output.try_into().unwrap() } } diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs index 6f7dcdb595..dd941cda74 100644 --- a/lcli/src/http_sync.rs +++ b/lcli/src/http_sync.rs @@ -139,8 +139,8 @@ async fn get_block_from_source( let block_root = block_from_source.canonical_root(); let block_contents = SignedBlockContents { signed_block: Arc::new(block_from_source), - kzg_proofs: kzg_proofs.into(), - blobs: blobs.into(), + kzg_proofs: kzg_proofs.try_into().unwrap(), + blobs: blobs.try_into().unwrap(), }; let publish_block_req = PublishBlockRequest::BlockContents(block_contents); diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index 26338a019a..bbbadac761 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -17,7 +17,7 @@ pub fn indexed_att_electra( target_root: u64, ) -> IndexedAttestation { IndexedAttestation::Electra(IndexedAttestationElectra { - attesting_indices: attesting_indices.as_ref().to_vec().into(), + attesting_indices: attesting_indices.as_ref().to_vec().try_into().unwrap(), data: AttestationData { slot: Slot::new(0), index: 0, @@ -42,7 +42,7 @@ pub fn indexed_att( target_root: u64, ) -> IndexedAttestation { IndexedAttestation::Base(IndexedAttestationBase { - attesting_indices: attesting_indices.as_ref().to_vec().into(), + attesting_indices: attesting_indices.as_ref().to_vec().try_into().unwrap(), data: AttestationData { slot: Slot::new(0), index: 0, diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 4152711aee..8742f8a140 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -318,14 +318,13 @@ where { let s: String = serde::de::Deserialize::deserialize(deserializer)?; let decoded: Vec = hex::decode(&s.as_str()[2..]).map_err(D::Error::custom)?; + let decoded_len = decoded.len(); - if decoded.len() > N::to_usize() { - Err(D::Error::custom(format!( + decoded.try_into().map_err(|_| { + D::Error::custom(format!( "Too many values for list, got: {}, limit: {}", - decoded.len(), + decoded_len, N::to_usize() - ))) - } else { - Ok(decoded.into()) - } + )) + }) } From f4b1bb46b52998342923629142fab93d9eaada7b Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 28 Oct 2025 09:49:47 +0400 Subject: [PATCH 15/44] Remove `compare_fields` and import from crates.io (#8189) Use the recently published `compare_fields` and remove it from Lighthouse https://crates.io/crates/compare_fields Co-Authored-By: Mac L --- Cargo.lock | 12 +- Cargo.toml | 5 +- common/compare_fields/Cargo.toml | 14 -- common/compare_fields/src/lib.rs | 197 ---------------------- common/compare_fields_derive/Cargo.toml | 12 -- common/compare_fields_derive/src/lib.rs | 70 -------- consensus/types/Cargo.toml | 1 - consensus/types/src/beacon_state.rs | 1 - consensus/types/src/historical_summary.rs | 2 +- testing/ef_tests/Cargo.toml | 1 - testing/ef_tests/src/cases/rewards.rs | 2 +- 11 files changed, 10 insertions(+), 307 deletions(-) delete mode 100644 common/compare_fields/Cargo.toml delete mode 100644 common/compare_fields/src/lib.rs delete mode 100644 common/compare_fields_derive/Cargo.toml delete mode 100644 common/compare_fields_derive/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 8cc058b615..8a282a60b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1658,15 +1658,19 @@ dependencies = [ [[package]] name = "compare_fields" -version = "0.2.0" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05162add7c8618791829528194a271dca93f69194d35b19db1ca7fbfb8275278" dependencies = [ "compare_fields_derive", - "itertools 0.10.5", + "itertools 0.14.0", ] [[package]] name = "compare_fields_derive" -version = "0.2.0" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f5ee468b2e568b668e2a686112935e7bbe9a81bf4fa6b9f6fc3410ea45fb7ce" dependencies = [ "quote", "syn 1.0.109", @@ -2539,7 +2543,6 @@ dependencies = [ "beacon_chain", "bls", "compare_fields", - "compare_fields_derive", "context_deserialize", "derivative", "eth2_network_config", @@ -9903,7 +9906,6 @@ dependencies = [ "beacon_chain", "bls", "compare_fields", - "compare_fields_derive", "context_deserialize", "criterion", "derivative", diff --git a/Cargo.toml b/Cargo.toml index 721102bd06..a979907769 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,8 +19,6 @@ members = [ "boot_node", "common/account_utils", "common/clap_utils", - "common/compare_fields", - "common/compare_fields_derive", "common/deposit_contract", "common/directory", "common/eip_3076", @@ -121,8 +119,7 @@ c-kzg = { version = "2.1", default-features = false } cargo_metadata = "0.19" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } clap_utils = { path = "common/clap_utils" } -compare_fields = { path = "common/compare_fields" } -compare_fields_derive = { path = "common/compare_fields_derive" } +compare_fields = "0.1" console-subscriber = "0.4" context_deserialize = { path = "consensus/context_deserialize/context_deserialize", features = [ "all", diff --git a/common/compare_fields/Cargo.toml b/common/compare_fields/Cargo.toml deleted file mode 100644 index 50e7e5f21d..0000000000 --- a/common/compare_fields/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "compare_fields" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -[package.metadata.cargo-udeps.ignore] -development = ["compare_fields_derive"] # used in doc-tests - -[dependencies] -itertools = { workspace = true } - -[dev-dependencies] -compare_fields_derive = { workspace = true } diff --git a/common/compare_fields/src/lib.rs b/common/compare_fields/src/lib.rs deleted file mode 100644 index 27baf14806..0000000000 --- a/common/compare_fields/src/lib.rs +++ /dev/null @@ -1,197 +0,0 @@ -//! Provides field-by-field comparisons for structs and vecs. -//! -//! Returns comparisons as data, without making assumptions about the desired equality (e.g., -//! does not `panic!` on inequality). -//! -//! Note: `compare_fields_derive` requires `PartialEq` and `Debug` implementations. -//! -//! ## Example -//! -//! ```rust -//! use compare_fields::{CompareFields, Comparison, FieldComparison}; -//! use compare_fields_derive::CompareFields; -//! -//! #[derive(PartialEq, Debug, CompareFields)] -//! pub struct Bar { -//! a: u64, -//! b: u16, -//! #[compare_fields(as_slice)] -//! c: Vec -//! } -//! -//! #[derive(Clone, PartialEq, Debug, CompareFields)] -//! pub struct Foo { -//! d: String -//! } -//! -//! let cat = Foo {d: "cat".to_string()}; -//! let dog = Foo {d: "dog".to_string()}; -//! let chicken = Foo {d: "chicken".to_string()}; -//! -//! let mut bar_a = Bar { -//! a: 42, -//! b: 12, -//! c: vec![ cat.clone(), dog.clone() ], -//! }; -//! -//! let mut bar_b = Bar { -//! a: 42, -//! b: 99, -//! c: vec![ chicken.clone(), dog.clone()] -//! }; -//! -//! let cat_dog = Comparison::Child(FieldComparison { -//! field_name: "d".to_string(), -//! equal: false, -//! a: "\"cat\"".to_string(), -//! b: "\"dog\"".to_string(), -//! }); -//! assert_eq!(cat.compare_fields(&dog), vec![cat_dog]); -//! -//! let bar_a_b = vec![ -//! Comparison::Child(FieldComparison { -//! field_name: "a".to_string(), -//! equal: true, -//! a: "42".to_string(), -//! b: "42".to_string(), -//! }), -//! Comparison::Child(FieldComparison { -//! field_name: "b".to_string(), -//! equal: false, -//! a: "12".to_string(), -//! b: "99".to_string(), -//! }), -//! Comparison::Parent{ -//! field_name: "c".to_string(), -//! equal: false, -//! children: vec![ -//! FieldComparison { -//! field_name: "0".to_string(), -//! equal: false, -//! a: "Some(Foo { d: \"cat\" })".to_string(), -//! b: "Some(Foo { d: \"chicken\" })".to_string(), -//! }, -//! FieldComparison { -//! field_name: "1".to_string(), -//! equal: true, -//! a: "Some(Foo { d: \"dog\" })".to_string(), -//! b: "Some(Foo { d: \"dog\" })".to_string(), -//! } -//! ] -//! } -//! ]; -//! assert_eq!(bar_a.compare_fields(&bar_b), bar_a_b); -//! ``` -use itertools::{EitherOrBoth, Itertools}; -use std::fmt::Debug; - -#[derive(Debug, PartialEq, Clone)] -pub enum Comparison { - Child(FieldComparison), - Parent { - field_name: String, - equal: bool, - children: Vec, - }, -} - -impl Comparison { - pub fn child>(field_name: String, a: &T, b: &T) -> Self { - Comparison::Child(FieldComparison::new(field_name, a, b)) - } - - pub fn parent(field_name: String, equal: bool, children: Vec) -> Self { - Comparison::Parent { - field_name, - equal, - children, - } - } - - pub fn from_slice>(field_name: String, a: &[T], b: &[T]) -> Self { - Self::from_iter(field_name, a.iter(), b.iter()) - } - - pub fn from_into_iter<'a, T: Debug + PartialEq + 'a>( - field_name: String, - a: impl IntoIterator, - b: impl IntoIterator, - ) -> Self { - Self::from_iter(field_name, a.into_iter(), b.into_iter()) - } - - pub fn from_iter<'a, T: Debug + PartialEq + 'a>( - field_name: String, - a: impl Iterator, - b: impl Iterator, - ) -> Self { - let mut children = vec![]; - let mut all_equal = true; - - for (i, entry) in a.zip_longest(b).enumerate() { - let comparison = match entry { - EitherOrBoth::Both(x, y) => { - FieldComparison::new(format!("{i}"), &Some(x), &Some(y)) - } - EitherOrBoth::Left(x) => FieldComparison::new(format!("{i}"), &Some(x), &None), - EitherOrBoth::Right(y) => FieldComparison::new(format!("{i}"), &None, &Some(y)), - }; - all_equal = all_equal && comparison.equal(); - children.push(comparison); - } - - Self::parent(field_name, all_equal, children) - } - - pub fn retain_children(&mut self, f: F) - where - F: FnMut(&FieldComparison) -> bool, - { - match self { - Comparison::Child(_) => (), - Comparison::Parent { children, .. } => children.retain(f), - } - } - - pub fn equal(&self) -> bool { - match self { - Comparison::Child(fc) => fc.equal, - Comparison::Parent { equal, .. } => *equal, - } - } - - pub fn not_equal(&self) -> bool { - !self.equal() - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct FieldComparison { - pub field_name: String, - pub equal: bool, - pub a: String, - pub b: String, -} - -pub trait CompareFields { - fn compare_fields(&self, b: &Self) -> Vec; -} - -impl FieldComparison { - pub fn new>(field_name: String, a: &T, b: &T) -> Self { - Self { - field_name, - equal: a == b, - a: format!("{a:?}"), - b: format!("{b:?}"), - } - } - - pub fn equal(&self) -> bool { - self.equal - } - - pub fn not_equal(&self) -> bool { - !self.equal() - } -} diff --git a/common/compare_fields_derive/Cargo.toml b/common/compare_fields_derive/Cargo.toml deleted file mode 100644 index 19682bf367..0000000000 --- a/common/compare_fields_derive/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "compare_fields_derive" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -[lib] -proc-macro = true - -[dependencies] -quote = { workspace = true } -syn = { workspace = true } diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs deleted file mode 100644 index 3529970721..0000000000 --- a/common/compare_fields_derive/src/lib.rs +++ /dev/null @@ -1,70 +0,0 @@ -use proc_macro::TokenStream; -use quote::quote; -use syn::{DeriveInput, parse_macro_input}; - -fn is_iter(field: &syn::Field) -> bool { - field.attrs.iter().any(|attr| { - attr.path.is_ident("compare_fields") - && (attr.tokens.to_string().replace(' ', "") == "(as_slice)" - || attr.tokens.to_string().replace(' ', "") == "(as_iter)") - }) -} - -#[proc_macro_derive(CompareFields, attributes(compare_fields))] -pub fn compare_fields_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - - let syn::Data::Struct(struct_data) = &item.data else { - panic!("compare_fields_derive only supports structs."); - }; - - let mut quotes = vec![]; - - for field in struct_data.fields.iter() { - let Some(ident_a) = &field.ident else { - panic!("compare_fields_derive only supports named struct fields."); - }; - let field_name = ident_a.to_string(); - let ident_b = ident_a.clone(); - - let quote = if is_iter(field) { - quote! { - comparisons.push(compare_fields::Comparison::from_into_iter( - #field_name.to_string(), - &self.#ident_a, - &b.#ident_b - )); - } - } else { - quote! { - comparisons.push( - compare_fields::Comparison::child( - #field_name.to_string(), - &self.#ident_a, - &b.#ident_b - ) - ); - } - }; - - quotes.push(quote); - } - - let output = quote! { - impl #impl_generics compare_fields::CompareFields for #name #ty_generics #where_clause { - fn compare_fields(&self, b: &Self) -> Vec { - let mut comparisons = vec![]; - - #( - #quotes - )* - - comparisons - } - } - }; - output.into() -} diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index bfce4b72d2..d9b2f10198 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -29,7 +29,6 @@ alloy-rlp = { version = "0.3.4", features = ["derive"] } arbitrary = { workspace = true, features = ["derive"], optional = true } bls = { workspace = true } compare_fields = { workspace = true } -compare_fields_derive = { workspace = true } context_deserialize = { workspace = true } derivative = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 1bd4927fe8..2b03fda303 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -5,7 +5,6 @@ use crate::historical_summary::HistoricalSummary; use crate::test_utils::TestRandom; use crate::*; use compare_fields::CompareFields; -use compare_fields_derive::CompareFields; use derivative::Derivative; use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index 0aad2d903d..dc147ad042 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -1,7 +1,7 @@ use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::{BeaconState, EthSpec, ForkName, Hash256}; -use compare_fields_derive::CompareFields; +use compare_fields::CompareFields; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 50007f9160..d9afce0efe 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -16,7 +16,6 @@ alloy-primitives = { workspace = true } beacon_chain = { workspace = true } bls = { workspace = true } compare_fields = { workspace = true } -compare_fields_derive = { workspace = true } context_deserialize = { workspace = true } derivative = { workspace = true } eth2_network_config = { workspace = true } diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index d6ce8be742..798014a6b0 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result_detailed; use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; -use compare_fields_derive::CompareFields; +use compare_fields::CompareFields; use serde::Deserialize; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; From 341eeeabe36f9501cfe85ee4a2eedd44ac46612a Mon Sep 17 00:00:00 2001 From: hopinheimer <48147533+hopinheimer@users.noreply.github.com> Date: Tue, 28 Oct 2025 12:32:02 +0530 Subject: [PATCH 16/44] Extracting the Error impl from the monolith `eth2` (#7878) Currently the `eth2` crate lib file is a large monolith of almost 3000 lines of code. As part of the bosun migration we are trying to increase code readability and modularity in the lighthouse crates initially, which then can be transferred to bosun Co-Authored-By: hopinheimer Co-Authored-By: hopinheimer <48147533+hopinheimer@users.noreply.github.com> --- common/eth2/src/error.rs | 165 +++++++++++++++++++ common/eth2/src/lib.rs | 117 +------------ common/eth2/src/lighthouse_vc/http_client.rs | 29 +--- common/eth2/src/types.rs | 42 +---- 4 files changed, 176 insertions(+), 177 deletions(-) create mode 100644 common/eth2/src/error.rs diff --git a/common/eth2/src/error.rs b/common/eth2/src/error.rs new file mode 100644 index 0000000000..c1bacb4510 --- /dev/null +++ b/common/eth2/src/error.rs @@ -0,0 +1,165 @@ +//! Centralized error handling for eth2 API clients +//! +//! This module consolidates all error types, response processing, +//! and recovery logic for both beacon node and validator client APIs. + +use pretty_reqwest_error::PrettyReqwestError; +use reqwest::{Response, StatusCode}; +use sensitive_url::SensitiveUrl; +use serde::{Deserialize, Serialize}; +use std::{fmt, path::PathBuf}; + +/// Main error type for eth2 API clients +#[derive(Debug)] +pub enum Error { + /// The `reqwest` client raised an error. + HttpClient(PrettyReqwestError), + /// The `reqwest_eventsource` client raised an error. + SseClient(Box), + /// The server returned an error message where the body was able to be parsed. + ServerMessage(ErrorMessage), + /// The server returned an error message with an array of errors. + ServerIndexedMessage(IndexedErrorMessage), + /// The server returned an error message where the body was unable to be parsed. + StatusCode(StatusCode), + /// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`. + InvalidUrl(SensitiveUrl), + /// The supplied validator client secret is invalid. + InvalidSecret(String), + /// The server returned a response with an invalid signature. It may be an impostor. + InvalidSignatureHeader, + /// The server returned a response without a signature header. It may be an impostor. + MissingSignatureHeader, + /// The server returned an invalid JSON response. + InvalidJson(serde_json::Error), + /// The server returned an invalid server-sent event. + InvalidServerSentEvent(String), + /// The server sent invalid response headers. + InvalidHeaders(String), + /// The server returned an invalid SSZ response. + InvalidSsz(ssz::DecodeError), + /// An I/O error occurred while loading an API token from disk. + TokenReadError(PathBuf, std::io::Error), + /// The client has been configured without a server pubkey, but requires one for this request. + NoServerPubkey, + /// The client has been configured without an API token, but requires one for this request. + NoToken, +} + +/// An API error serializable to JSON. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ErrorMessage { + pub code: u16, + pub message: String, + #[serde(default)] + pub stacktraces: Vec, +} + +/// An indexed API error serializable to JSON. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct IndexedErrorMessage { + pub code: u16, + pub message: String, + pub failures: Vec, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Failure { + pub index: u64, + pub message: String, +} + +impl Failure { + pub fn new(index: usize, message: String) -> Self { + Self { + index: index as u64, + message, + } + } +} + +/// Server error response variants +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ResponseError { + Indexed(IndexedErrorMessage), + Message(ErrorMessage), +} + +impl Error { + /// If the error has a HTTP status code, return it. + pub fn status(&self) -> Option { + match self { + Error::HttpClient(error) => error.inner().status(), + Error::SseClient(error) => { + if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error.as_ref() { + Some(*status) + } else { + None + } + } + Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), + Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), + Error::StatusCode(status) => Some(*status), + Error::InvalidUrl(_) => None, + Error::InvalidSecret(_) => None, + Error::InvalidSignatureHeader => None, + Error::MissingSignatureHeader => None, + Error::InvalidJson(_) => None, + Error::InvalidSsz(_) => None, + Error::InvalidServerSentEvent(_) => None, + Error::InvalidHeaders(_) => None, + Error::TokenReadError(..) => None, + Error::NoServerPubkey | Error::NoToken => None, + } + } +} + +impl From for Error { + fn from(error: reqwest::Error) -> Self { + Error::HttpClient(error.into()) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Returns `Ok(response)` if the response is a `200 OK`, `202 ACCEPTED`, or `204 NO_CONTENT` +/// Otherwise, creates an appropriate error message. +pub async fn ok_or_error(response: Response) -> Result { + let status = response.status(); + + if status == StatusCode::OK + || status == StatusCode::ACCEPTED + || status == StatusCode::NO_CONTENT + { + Ok(response) + } else if let Ok(message) = response.json::().await { + match message { + ResponseError::Message(message) => Err(Error::ServerMessage(message)), + ResponseError::Indexed(indexed) => Err(Error::ServerIndexedMessage(indexed)), + } + } else { + Err(Error::StatusCode(status)) + } +} + +/// Returns `Ok(response)` if the response is a success (2xx) response. Otherwise, creates an +/// appropriate error message. +pub async fn success_or_error(response: Response) -> Result { + let status = response.status(); + + if status.is_success() { + Ok(response) + } else if let Ok(message) = response.json().await { + match message { + ResponseError::Message(message) => Err(Error::ServerMessage(message)), + ResponseError::Indexed(indexed) => Err(Error::ServerIndexedMessage(indexed)), + } + } else { + Err(Error::StatusCode(status)) + } +} diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 995e6966ea..a9dd752df0 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -7,6 +7,7 @@ //! Eventually it would be ideal to publish this crate on crates.io, however we have some local //! dependencies preventing this presently. +pub mod error; #[cfg(feature = "lighthouse")] pub mod lighthouse; #[cfg(feature = "lighthouse")] @@ -14,14 +15,14 @@ pub mod lighthouse_vc; pub mod mixin; pub mod types; +pub use self::error::{Error, ok_or_error, success_or_error}; use self::mixin::{RequestAccept, ResponseOptional}; -use self::types::{Error as ResponseError, *}; +use self::types::*; use ::types::beacon_response::ExecutionOptimisticFinalizedBeaconResponse; use derivative::Derivative; use futures::Stream; use futures_util::StreamExt; use libp2p_identity::PeerId; -use pretty_reqwest_error::PrettyReqwestError; pub use reqwest; use reqwest::{ Body, IntoUrl, RequestBuilder, Response, @@ -34,7 +35,6 @@ use serde::{Serialize, de::DeserializeOwned}; use ssz::Encode; use std::fmt; use std::future::Future; -use std::path::PathBuf; use std::time::Duration; pub const V1: EndpointVersion = EndpointVersion(1); @@ -68,83 +68,6 @@ const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_DEFAULT_TIMEOUT_QUOTIENT: u32 = 4; -#[derive(Debug)] -pub enum Error { - /// The `reqwest` client raised an error. - HttpClient(PrettyReqwestError), - /// The `reqwest_eventsource` client raised an error. - SseClient(Box), - /// The server returned an error message where the body was able to be parsed. - ServerMessage(ErrorMessage), - /// The server returned an error message with an array of errors. - ServerIndexedMessage(IndexedErrorMessage), - /// The server returned an error message where the body was unable to be parsed. - StatusCode(StatusCode), - /// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`. - InvalidUrl(SensitiveUrl), - /// The supplied validator client secret is invalid. - InvalidSecret(String), - /// The server returned a response with an invalid signature. It may be an impostor. - InvalidSignatureHeader, - /// The server returned a response without a signature header. It may be an impostor. - MissingSignatureHeader, - /// The server returned an invalid JSON response. - InvalidJson(serde_json::Error), - /// The server returned an invalid server-sent event. - InvalidServerSentEvent(String), - /// The server sent invalid response headers. - InvalidHeaders(String), - /// The server returned an invalid SSZ response. - InvalidSsz(ssz::DecodeError), - /// An I/O error occurred while loading an API token from disk. - TokenReadError(PathBuf, std::io::Error), - /// The client has been configured without a server pubkey, but requires one for this request. - NoServerPubkey, - /// The client has been configured without an API token, but requires one for this request. - NoToken, -} - -impl From for Error { - fn from(error: reqwest::Error) -> Self { - Error::HttpClient(error.into()) - } -} - -impl Error { - /// If the error has a HTTP status code, return it. - pub fn status(&self) -> Option { - match self { - Error::HttpClient(error) => error.inner().status(), - Error::SseClient(error) => { - if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error.as_ref() { - Some(*status) - } else { - None - } - } - Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), - Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), - Error::StatusCode(status) => Some(*status), - Error::InvalidUrl(_) => None, - Error::InvalidSecret(_) => None, - Error::InvalidSignatureHeader => None, - Error::MissingSignatureHeader => None, - Error::InvalidJson(_) => None, - Error::InvalidSsz(_) => None, - Error::InvalidServerSentEvent(_) => None, - Error::InvalidHeaders(_) => None, - Error::TokenReadError(..) => None, - Error::NoServerPubkey | Error::NoToken => None, - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - /// A struct to define a variety of different timeouts for different validator tasks to ensure /// proper fallback behaviour. #[derive(Clone, Debug, PartialEq, Eq)] @@ -2928,37 +2851,3 @@ impl BeaconNodeHttpClient { .await } } - -/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an -/// appropriate error message. -pub async fn ok_or_error(response: Response) -> Result { - let status = response.status(); - - if status == StatusCode::OK { - Ok(response) - } else if let Ok(message) = response.json().await { - match message { - ResponseError::Message(message) => Err(Error::ServerMessage(message)), - ResponseError::Indexed(indexed) => Err(Error::ServerIndexedMessage(indexed)), - } - } else { - Err(Error::StatusCode(status)) - } -} - -/// Returns `Ok(response)` if the response is a success (2xx) response. Otherwise, creates an -/// appropriate error message. -pub async fn success_or_error(response: Response) -> Result { - let status = response.status(); - - if status.is_success() { - Ok(response) - } else if let Ok(message) = response.json().await { - match message { - ResponseError::Message(message) => Err(Error::ServerMessage(message)), - ResponseError::Indexed(indexed) => Err(Error::ServerIndexedMessage(indexed)), - } - } else { - Err(Error::StatusCode(status)) - } -} diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 6028960553..c4fddb97d7 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -1,5 +1,5 @@ use super::types::*; -use crate::Error; +use crate::{Error, success_or_error}; use reqwest::{ IntoUrl, header::{HeaderMap, HeaderValue}, @@ -145,7 +145,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await + success_or_error(response).await } /// Perform a HTTP DELETE request, returning the `Response` for further processing. @@ -157,7 +157,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await + success_or_error(response).await } async fn get(&self, url: U) -> Result { @@ -218,7 +218,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await + success_or_error(response).await } async fn post( @@ -250,7 +250,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await?; + success_or_error(response).await?; Ok(()) } @@ -268,7 +268,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await + success_or_error(response).await } /// Perform a HTTP DELETE request. @@ -681,20 +681,3 @@ impl ValidatorClientHttpClient { self.delete(url).await } } - -/// Returns `Ok(response)` if the response is a `200 OK` response or a -/// `202 Accepted` response. Otherwise, creates an appropriate error message. -async fn ok_or_error(response: Response) -> Result { - let status = response.status(); - - if status == StatusCode::OK - || status == StatusCode::ACCEPTED - || status == StatusCode::NO_CONTENT - { - Ok(response) - } else if let Ok(message) = response.json().await { - Err(Error::ServerMessage(message)) - } else { - Err(Error::StatusCode(status)) - } -} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 60bc0804e4..a90fe6d058 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -26,46 +26,8 @@ pub use types::*; #[cfg(feature = "lighthouse")] use crate::lighthouse::BlockReward; -/// An API error serializable to JSON. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum Error { - Indexed(IndexedErrorMessage), - Message(ErrorMessage), -} - -/// An API error serializable to JSON. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct ErrorMessage { - pub code: u16, - pub message: String, - #[serde(default)] - pub stacktraces: Vec, -} - -/// An indexed API error serializable to JSON. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct IndexedErrorMessage { - pub code: u16, - pub message: String, - pub failures: Vec, -} - -/// A single failure in an index of API errors, serializable to JSON. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct Failure { - pub index: u64, - pub message: String, -} - -impl Failure { - pub fn new(index: usize, message: String) -> Self { - Self { - index: index as u64, - message, - } - } -} +// Re-export error types from the unified error module +pub use crate::error::{ErrorMessage, Failure, IndexedErrorMessage, ResponseError as Error}; /// The version of a single API endpoint, e.g. the `v1` in `/eth/v1/beacon/blocks`. #[derive(Debug, Clone, Copy, PartialEq)] From 6f0d0dec75503883542a4af136cd22ee461903e3 Mon Sep 17 00:00:00 2001 From: hopinheimer <48147533+hopinheimer@users.noreply.github.com> Date: Wed, 29 Oct 2025 10:42:57 +0530 Subject: [PATCH 17/44] Fix failing CI for `compile-with-beta-compiler` (#8317) Co-Authored-By: hopinheimer --- consensus/types/src/light_client_bootstrap.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 5850db876c..21e805f2c7 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -188,8 +188,6 @@ impl LightClientBootstrap { block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, ) -> Result { - let mut header = beacon_state.latest_block_header().clone(); - header.state_root = beacon_state.update_tree_hash_cache()?; let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); From b69c2f5ba1c54760e24d3d18cc8406c9d531ba81 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 29 Oct 2025 00:00:25 -0700 Subject: [PATCH 18/44] Run CI tests only recent forks (#8271) Partially addresses #8248 Run the beacon chain, http and network tests only for recent forks instead of everything from phase 0. Also added gloas also to the recent forks list. I thought that would be a good way to know if changes in the current fork affect future forks. Not completely sure if we should run for future forks, but added it so that we can discuss here. Co-Authored-By: Pawan Dhananjay Co-Authored-By: Jimmy Chen --- Makefile | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index b9f93942f6..2edc9f8632 100644 --- a/Makefile +++ b/Makefile @@ -30,12 +30,8 @@ TEST_FEATURES ?= # Cargo profile for regular builds. PROFILE ?= release -# List of all hard forks. This list is used to set env variables for several tests so that -# they run for different forks. -FORKS=phase0 altair bellatrix capella deneb electra fulu gloas - # List of all recent hard forks. This list is used to set env variables for http_api tests -RECENT_FORKS=electra fulu +RECENT_FORKS=electra fulu gloas # Extra flags for Cargo CARGO_INSTALL_EXTRA_FLAGS?= @@ -170,8 +166,8 @@ run-ef-tests: cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests -# Run the tests in the `beacon_chain` crate for all known forks. -test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) +# Run the tests in the `beacon_chain` crate for recent forks. +test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(RECENT_FORKS)) test-beacon-chain-%: env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain @@ -184,15 +180,15 @@ test-http-api-%: # Run the tests in the `operation_pool` crate for all known forks. -test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) +test-op-pool: $(patsubst %,test-op-pool-%,$(RECENT_FORKS)) test-op-pool-%: env FORK_NAME=$* cargo nextest run --release \ --features "beacon_chain/fork_from_env,$(TEST_FEATURES)"\ -p operation_pool -# Run the tests in the `network` crate for all known forks. -test-network: $(patsubst %,test-network-%,$(FORKS)) +# Run the tests in the `network` crate for recent forks. +test-network: $(patsubst %,test-network-%,$(RECENT_FORKS)) test-network-%: env FORK_NAME=$* cargo nextest run --release \ From f70c650d814dab9df44da41cc2c150d9f039e2e8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 29 Oct 2025 19:21:23 +1100 Subject: [PATCH 19/44] Update spec tests to v1.6.0-beta.1 (#8263) Update the EF spec tests to v1.6.0-beta.1 There are a few new light client tests (which we pass), and some for progressive containers, which we haven't implemented (we ignore them). Co-Authored-By: Michael Sproul --- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 2 ++ testing/ef_tests/download_test_vectors.sh | 2 +- testing/ef_tests/src/handler.rs | 3 +-- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index da8640d681..0ead9d0047 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.6.0-alpha.6 +CONSENSUS_SPECS_TEST_VERSION ?= v1.6.0-beta.1 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 41e3c4bff7..1f70881a88 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -54,6 +54,8 @@ excluded_paths = [ "tests/general/phase0/ssz_generic/basic_progressive_list", "tests/general/phase0/ssz_generic/containers/.*/ProgressiveBitsStruct.*", "tests/general/phase0/ssz_generic/containers/.*/ProgressiveTestStruct.*", + "tests/general/phase0/ssz_generic/progressive_containers/.*", + "tests/general/phase0/ssz_generic/compatible_unions/.*", # Ignore full epoch tests for now (just test the sub-transitions). "tests/.*/.*/epoch_processing/.*/pre_epoch.ssz_snappy", "tests/.*/.*/epoch_processing/.*/post_epoch.ssz_snappy", diff --git a/testing/ef_tests/download_test_vectors.sh b/testing/ef_tests/download_test_vectors.sh index 7297f7eeb8..21f74e817f 100755 --- a/testing/ef_tests/download_test_vectors.sh +++ b/testing/ef_tests/download_test_vectors.sh @@ -57,7 +57,7 @@ else if [[ ! -e "${test}.tar.gz" ]]; then echo "Downloading: ${version}/${test}.tar.gz" curl --progress-bar --location --remote-name --show-error --retry 3 --retry-all-errors --fail \ - "https://github.com/ethereum/consensus-spec-tests/releases/download/${version}/${test}.tar.gz" \ + "https://github.com/ethereum/consensus-specs/releases/download/${version}/${test}.tar.gz" \ || { echo "Curl failed. Aborting" rm -f "${test}.tar.gz" diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index c31a75c335..b49ab2d90d 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -1083,8 +1083,7 @@ impl Handler for LightClientUpdateHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Enabled in Altair - // No test in Fulu yet. - fork_name.altair_enabled() && fork_name != ForkName::Fulu + fork_name.altair_enabled() } } From 1cee814a95a48e51119d62f3de2f37b566ff9a51 Mon Sep 17 00:00:00 2001 From: Odinson Date: Thu, 30 Oct 2025 08:46:07 +0530 Subject: [PATCH 20/44] Fix: custody backfill sync display incorrect time estimation (#8291) Fixes #8268 Switch `est_time` from time until DA boundary slot, to time to finish total custody work from the original earliest data-column slot down to the DA boundary Co-Authored-By: PoulavBhowmick03 --- beacon_node/client/src/notifier.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 10d9587ccc..b1cf1bd7f5 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -298,28 +298,28 @@ pub fn spawn_notifier( let speed = speedo.slots_per_second(); let display_speed = speed.is_some_and(|speed| speed != 0.0); - + let est_time_in_secs = if let (Some(da_boundary_epoch), Some(original_slot)) = ( + beacon_chain.get_column_da_boundary(), + original_earliest_data_column_slot, + ) { + let target = original_slot.saturating_sub( + da_boundary_epoch.start_slot(T::EthSpec::slots_per_epoch()), + ); + speedo.estimated_time_till_slot(target) + } else { + None + }; if display_speed { info!( distance, speed = sync_speed_pretty(speed), - est_time = - estimated_time_pretty(beacon_chain.get_column_da_boundary().and_then( - |da_boundary| speedo.estimated_time_till_slot( - da_boundary.start_slot(T::EthSpec::slots_per_epoch()) - ) - )), + est_time = estimated_time_pretty(est_time_in_secs), "Downloading historical data columns" ); } else { info!( distance, - est_time = - estimated_time_pretty(beacon_chain.get_column_da_boundary().and_then( - |da_boundary| speedo.estimated_time_till_slot( - da_boundary.start_slot(T::EthSpec::slots_per_epoch()) - ) - )), + est_time = estimated_time_pretty(est_time_in_secs), "Downloading historical data columns" ); } From 30094f0c08c451087935ab932d1ac64b635085e7 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 30 Oct 2025 14:42:36 +1100 Subject: [PATCH 21/44] Remove redundant `subscribe_all_data_column_subnets` field from network (#8259) Addresses this comment: https://github.com/sigp/lighthouse/pull/8254#discussion_r2447998786 We're currently using `subscribe_all_data_column_subnets` here to subscribe to all subnets https://github.com/sigp/lighthouse/blob/522bd9e9c6ac167f2231525e937c9ebbcb86cf6e/beacon_node/lighthouse_network/src/types/topics.rs#L82-L92 But its unnecessary because the else path also works for supernode (uses `sampling_subnets` instead) The big diffs will disappear once #8254 is merged. Co-Authored-By: Jimmy Chen --- beacon_node/lighthouse_network/src/config.rs | 4 -- .../lighthouse_network/src/discovery/enr.rs | 58 +++---------------- .../lighthouse_network/src/discovery/mod.rs | 5 +- .../lighthouse_network/src/service/mod.rs | 2 +- .../lighthouse_network/src/types/globals.rs | 1 - .../lighthouse_network/src/types/topics.rs | 18 ++---- beacon_node/src/config.rs | 1 - lcli/src/generate_bootnode_enr.rs | 2 +- 8 files changed, 17 insertions(+), 74 deletions(-) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 89c6c58d4f..416ca73e08 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -94,9 +94,6 @@ pub struct Config { /// Attempt to construct external port mappings with UPnP. pub upnp_enabled: bool, - /// Subscribe to all data column subnets for the duration of the runtime. - pub subscribe_all_data_column_subnets: bool, - /// Subscribe to all subnets for the duration of the runtime. pub subscribe_all_subnets: bool, @@ -355,7 +352,6 @@ impl Default for Config { upnp_enabled: true, network_load: 3, private: false, - subscribe_all_data_column_subnets: false, subscribe_all_subnets: false, import_all_attestations: false, shutdown_after_sync: false, diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index bb9ff299c5..4c285ea86c 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -159,7 +159,7 @@ pub fn build_or_load_enr( local_key: Keypair, config: &NetworkConfig, enr_fork_id: &EnrForkId, - custody_group_count: Option, + custody_group_count: u64, next_fork_digest: [u8; 4], spec: &ChainSpec, ) -> Result { @@ -185,7 +185,7 @@ pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, enr_fork_id: &EnrForkId, - custody_group_count: Option, + custody_group_count: u64, next_fork_digest: [u8; 4], spec: &ChainSpec, ) -> Result { @@ -280,15 +280,6 @@ pub fn build_enr( // only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled if spec.is_peer_das_scheduled() { - let custody_group_count = if let Some(cgc) = custody_group_count { - cgc - } else if let Some(false_cgc) = config.advertise_false_custody_group_count { - false_cgc - } else if config.subscribe_all_data_column_subnets { - spec.number_of_custody_groups - } else { - spec.custody_requirement - }; builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); builder.add_value(NEXT_FORK_DIGEST_ENR_KEY, &next_fork_digest); } @@ -373,7 +364,7 @@ mod test { fn build_enr_with_config( config: NetworkConfig, - cgc: Option, + cgc: u64, spec: &ChainSpec, ) -> (Enr, CombinedKey) { let keypair = libp2p::identity::secp256k1::Keypair::generate(); @@ -386,56 +377,23 @@ mod test { #[test] fn test_nfd_enr_encoding() { let spec = make_fulu_spec(); - let enr = build_enr_with_config(NetworkConfig::default(), None, &spec).0; + let enr = + build_enr_with_config(NetworkConfig::default(), spec.custody_requirement, &spec).0; assert_eq!(enr.next_fork_digest().unwrap(), TEST_NFD); } - #[test] - fn custody_group_count_default() { - let config = NetworkConfig { - subscribe_all_data_column_subnets: false, - ..NetworkConfig::default() - }; - let spec = make_fulu_spec(); - - let enr = build_enr_with_config(config, None, &spec).0; - - assert_eq!( - enr.custody_group_count::(&spec).unwrap(), - spec.custody_requirement, - ); - } - - #[test] - fn custody_group_count_all() { - let config = NetworkConfig { - subscribe_all_data_column_subnets: true, - ..NetworkConfig::default() - }; - let spec = make_fulu_spec(); - let enr = build_enr_with_config(config, None, &spec).0; - - assert_eq!( - enr.custody_group_count::(&spec).unwrap(), - spec.number_of_custody_groups, - ); - } - #[test] fn custody_group_value() { - let config = NetworkConfig { - subscribe_all_data_column_subnets: true, - ..NetworkConfig::default() - }; + let config = NetworkConfig::default(); let spec = make_fulu_spec(); - let enr = build_enr_with_config(config, Some(42), &spec).0; + let enr = build_enr_with_config(config, 42, &spec).0; assert_eq!(enr.custody_group_count::(&spec).unwrap(), 42); } #[test] fn test_encode_decode_eth2_enr() { - let (enr, _key) = build_enr_with_config(NetworkConfig::default(), None, &E::default_spec()); + let (enr, _key) = build_enr_with_config(NetworkConfig::default(), 4, &E::default_spec()); // Check all Eth2 Mappings are decodeable enr.eth2().unwrap(); enr.attestation_bitfield::().unwrap(); diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 49de62546d..3589882ae9 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1243,11 +1243,12 @@ mod tests { let config = Arc::new(config); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); let next_fork_digest = [0; 4]; + let custody_group_count = spec.custody_requirement; let enr: Enr = build_enr::( &enr_key, &config, &EnrForkId::default(), - None, + custody_group_count, next_fork_digest, &spec, ) @@ -1258,7 +1259,7 @@ mod tests { seq_number: 0, attnets: Default::default(), syncnets: Default::default(), - custody_group_count: spec.custody_requirement, + custody_group_count, }), vec![], false, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ea2c53a07f..1df17dffba 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -199,7 +199,7 @@ impl Network { local_keypair.clone(), &config, &ctx.enr_fork_id, - Some(advertised_cgc), + advertised_cgc, next_fork_digest, &ctx.chain_spec, )?; diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 2a3571c3b7..f46eb05ceb 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -227,7 +227,6 @@ impl NetworkGlobals { TopicConfig { enable_light_client_server: self.config.enable_light_client_server, subscribe_all_subnets: self.config.subscribe_all_subnets, - subscribe_all_data_column_subnets: self.config.subscribe_all_data_column_subnets, sampling_subnets: self.sampling_subnets.read().clone(), } } diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index b22adfbc48..cfdee907b9 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -29,7 +29,6 @@ pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update pub struct TopicConfig { pub enable_light_client_server: bool, pub subscribe_all_subnets: bool, - pub subscribe_all_data_column_subnets: bool, pub sampling_subnets: HashSet, } @@ -80,14 +79,8 @@ pub fn core_topics_to_subscribe( } if fork_name.fulu_enabled() { - if opts.subscribe_all_data_column_subnets { - for i in 0..spec.data_column_sidecar_subnet_count { - topics.push(GossipKind::DataColumnSidecar(i.into())); - } - } else { - for subnet in &opts.sampling_subnets { - topics.push(GossipKind::DataColumnSidecar(*subnet)); - } + for subnet in &opts.sampling_subnets { + topics.push(GossipKind::DataColumnSidecar(*subnet)); } } @@ -125,7 +118,6 @@ pub fn all_topics_at_fork(fork: ForkName, spec: &ChainSpec) -> Vec(fork, &opts, spec) @@ -520,7 +512,6 @@ mod tests { TopicConfig { enable_light_client_server: false, subscribe_all_subnets: false, - subscribe_all_data_column_subnets: false, sampling_subnets: sampling_subnets.clone(), } } @@ -552,9 +543,8 @@ mod tests { #[test] fn columns_are_subscribed_in_peerdas() { let spec = get_spec(); - let s = get_sampling_subnets(); - let mut topic_config = get_topic_config(&s); - topic_config.subscribe_all_data_column_subnets = true; + let s = HashSet::from_iter([0.into()]); + let topic_config = get_topic_config(&s); assert!( core_topics_to_subscribe::(ForkName::Fulu, &topic_config, &spec) .contains(&GossipKind::DataColumnSidecar(0.into())) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 3b0e80e0b7..0f169ffaad 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -114,7 +114,6 @@ pub fn get_config( let is_semi_supernode = parse_flag(cli_args, "semi-supernode"); client_config.chain.node_custody_type = if is_supernode { - client_config.network.subscribe_all_data_column_subnets = true; NodeCustodyType::Supernode } else if is_semi_supernode { NodeCustodyType::SemiSupernode diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index ddd36e7e7a..71186904d0 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -43,7 +43,7 @@ pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), Str &enr_key, &config, &enr_fork_id, - None, + spec.custody_requirement, genesis_fork_digest, spec, ) From 5978b4a677e320935e7dc5597bf608895a8494f2 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 30 Oct 2025 15:48:30 +1100 Subject: [PATCH 22/44] Bump gas limit to 60M (#8331) Bump gas limit to 60M as part of Fusaka mainnet release. Co-Authored-By: Jimmy Chen Co-Authored-By: Jimmy Chen --- .../src/test_utils/execution_block_generator.rs | 2 +- beacon_node/execution_layer/src/test_utils/mock_builder.rs | 2 +- book/src/advanced_builders.md | 4 ++-- book/src/help_vc.md | 2 +- lighthouse/tests/validator_client.rs | 2 +- validator_client/lighthouse_validator_store/src/lib.rs | 2 +- validator_client/src/cli.rs | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 4836f9307c..f1d07ae258 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -29,7 +29,7 @@ use super::DEFAULT_TERMINAL_BLOCK; const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); const TEST_BLOB_BUNDLE_V2: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle_v2.ssz"); -pub const DEFAULT_GAS_LIMIT: u64 = 45_000_000; +pub const DEFAULT_GAS_LIMIT: u64 = 60_000_000; const GAS_USED: u64 = DEFAULT_GAS_LIMIT - 1; #[derive(Clone, Debug, PartialEq)] diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 6b63881d85..df1e371719 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -40,7 +40,7 @@ use warp::reply::{self, Reply}; use warp::{Filter, Rejection}; pub const DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); -pub const DEFAULT_GAS_LIMIT: u64 = 45_000_000; +pub const DEFAULT_GAS_LIMIT: u64 = 60_000_000; pub const DEFAULT_BUILDER_PRIVATE_KEY: &str = "607a11b45a7219cc61a3d9c5fd08c7eebd602a6a19a977f8d3771d5711a550f2"; diff --git a/book/src/advanced_builders.md b/book/src/advanced_builders.md index 3beb7c71c4..7202bd7bb9 100644 --- a/book/src/advanced_builders.md +++ b/book/src/advanced_builders.md @@ -60,7 +60,7 @@ relays, run one of the following services and configure lighthouse to use it wit ## Validator Client Configuration In the validator client you can configure gas limit and fee recipient on a per-validator basis. If no gas limit is -configured, Lighthouse will use a default gas limit of 45,000,000, which is the current default value used in execution +configured, Lighthouse will use a default gas limit of 60,000,000, which is the current default value used in execution engines. You can also enable or disable use of external builders on a per-validator basis rather than using `--builder-proposals`, `--builder-boost-factor` or `--prefer-builder-proposals`, which apply builder related preferences for all validators. In order to manage these configurations per-validator, you can either make updates to the `validator_definitions.yml` file @@ -75,7 +75,7 @@ transaction within the block to the fee recipient, so a discrepancy in fee recip is something afoot. > Note: The gas limit configured here is effectively a vote on block size, so the configuration should not be taken lightly. -> 45,000,000 is currently seen as a value balancing block size with how expensive it is for +> 60,000,000 is currently seen as a value balancing block size with how expensive it is for > the network to validate blocks. So if you don't feel comfortable making an informed "vote", using the default value is > encouraged. We will update the default value if the community reaches a rough consensus on a new value. diff --git a/book/src/help_vc.md b/book/src/help_vc.md index bd6a606ff3..b19ff0ba38 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -40,7 +40,7 @@ Options: The gas limit to be used in all builder proposals for all validators managed by this validator client. Note this will not necessarily be used if the gas limit set here moves too far from the previous block's - gas limit. [default: 45000000] + gas limit. [default: 60000000] --genesis-state-url A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server URLs can generally be used with diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 913011ea3a..398c6fbd6b 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -505,7 +505,7 @@ fn no_doppelganger_protection_flag() { fn no_gas_limit_flag() { CommandLineTest::new() .run() - .with_config(|config| assert!(config.validator_store.gas_limit == Some(45_000_000))); + .with_config(|config| assert!(config.validator_store.gas_limit == Some(60_000_000))); } #[test] fn gas_limit_flag() { diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index ed1ffa6bf6..d10fecb32e 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -56,7 +56,7 @@ const SLASHING_PROTECTION_HISTORY_EPOCHS: u64 = 512; /// Currently used as the default gas limit in execution clients. /// /// https://ethpandaops.io/posts/gaslimit-scaling/. -pub const DEFAULT_GAS_LIMIT: u64 = 45_000_000; +pub const DEFAULT_GAS_LIMIT: u64 = 60_000_000; pub struct LighthouseValidatorStore { validators: Arc>, diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 85e40ae6d3..477781d3e8 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -388,7 +388,7 @@ pub struct ValidatorClient { #[clap( long, value_name = "INTEGER", - default_value_t = 45_000_000, + default_value_t = 60_000_000, requires = "builder_proposals", help = "The gas limit to be used in all builder proposals for all validators managed \ by this validator client. Note this will not necessarily be used if the gas limit \ From af9cae4d3e32fadbc022c494a8cad7bbc6012398 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Fri, 31 Oct 2025 00:47:27 +0800 Subject: [PATCH 23/44] Add `version` to the response of beacon API client side (#8326) Co-Authored-By: Tan Chee Keong --- beacon_node/http_api/tests/tests.rs | 30 +++++++++++++++++++++-------- common/eth2/src/lib.rs | 17 +++++++++++----- 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index dc2fd4ae44..3b69430efc 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1316,12 +1316,14 @@ impl ApiTester { .ok() .map(|(state, _execution_optimistic, _finalized)| state); - let result = self + let result = match self .client .get_beacon_states_pending_deposits(state_id.0) .await - .unwrap() - .map(|res| res.data); + { + Ok(response) => response, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; if result.is_none() && state_opt.is_none() { continue; @@ -1330,7 +1332,12 @@ impl ApiTester { let state = state_opt.as_mut().expect("result should be none"); let expected = state.pending_deposits().unwrap(); - assert_eq!(result.unwrap(), expected.to_vec()); + let response = result.unwrap(); + assert_eq!(response.data(), &expected.to_vec()); + + // Check that the version header is returned in the response + let fork_name = state.fork_name(&self.chain.spec).unwrap(); + assert_eq!(response.version(), Some(fork_name),); } self @@ -1343,12 +1350,14 @@ impl ApiTester { .ok() .map(|(state, _execution_optimistic, _finalized)| state); - let result = self + let result = match self .client .get_beacon_states_pending_partial_withdrawals(state_id.0) .await - .unwrap() - .map(|res| res.data); + { + Ok(response) => response, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; if result.is_none() && state_opt.is_none() { continue; @@ -1357,7 +1366,12 @@ impl ApiTester { let state = state_opt.as_mut().expect("result should be none"); let expected = state.pending_partial_withdrawals().unwrap(); - assert_eq!(result.unwrap(), expected.to_vec()); + let response = result.unwrap(); + assert_eq!(response.data(), &expected.to_vec()); + + // Check that the version header is returned in the response + let fork_name = state.fork_name(&self.chain.spec).unwrap(); + assert_eq!(response.version(), Some(fork_name),); } self diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index a9dd752df0..2641a4c02e 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -827,7 +827,8 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_pending_deposits( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -837,7 +838,9 @@ impl BeaconNodeHttpClient { .push(&state_id.to_string()) .push("pending_deposits"); - self.get_opt(path).await + self.get_fork_contextual(path, |fork| fork) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET beacon/states/{state_id}/pending_partial_withdrawals` @@ -846,8 +849,10 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_pending_partial_withdrawals( &self, state_id: StateId, - ) -> Result>>, Error> - { + ) -> Result< + Option>>, + Error, + > { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -857,7 +862,9 @@ impl BeaconNodeHttpClient { .push(&state_id.to_string()) .push("pending_partial_withdrawals"); - self.get_opt(path).await + self.get_fork_contextual(path, |fork| fork) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET beacon/states/{state_id}/pending_consolidations` From 55588f778937485bc21723e5dd808c67fb055a80 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 31 Oct 2025 01:08:37 -0700 Subject: [PATCH 24/44] Rust 1.91 lints (#8340) Co-Authored-By: Eitan Seri- Levi --- beacon_node/beacon_chain/src/attestation_verification.rs | 2 +- .../beacon_chain/src/sync_committee_verification.rs | 2 +- common/eth2/src/types.rs | 9 ++------- validator_client/http_api/src/tests/keystores.rs | 4 ++-- validator_client/initialized_validators/src/key_cache.rs | 2 +- 5 files changed, 7 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 470664d442..f740d221c0 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1344,7 +1344,7 @@ pub fn verify_signed_aggregate_signatures( .spec .fork_at_epoch(indexed_attestation.data().target.epoch); - let signature_sets = vec![ + let signature_sets = [ signed_aggregate_selection_proof_signature_set( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), signed_aggregate, diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index f804176921..41d29d5526 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -628,7 +628,7 @@ pub fn verify_signed_aggregate_signatures( (signed_aggregate.message.contribution.slot + 1).epoch(T::EthSpec::slots_per_epoch()); let fork = chain.spec.fork_at_epoch(next_slot_epoch); - let signature_sets = vec![ + let signature_sets = [ signed_sync_aggregate_selection_proof_signature_set( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), signed_aggregate, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 8f553b57d9..f35518ee6b 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1560,20 +1560,15 @@ pub struct ForkChoiceNode { pub execution_block_hash: Option, } -#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub enum BroadcastValidation { + #[default] Gossip, Consensus, ConsensusAndEquivocation, } -impl Default for BroadcastValidation { - fn default() -> Self { - Self::Gossip - } -} - impl Display for BroadcastValidation { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index a3c6cb4be3..dd2266e3f6 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -2091,7 +2091,7 @@ async fn import_remotekey_web3signer_disabled() { // Import web3signers. tester .client - .post_lighthouse_validators_web3signer(&vec![web3signer_req]) + .post_lighthouse_validators_web3signer(&[web3signer_req]) .await .unwrap(); @@ -2146,7 +2146,7 @@ async fn import_remotekey_web3signer_enabled() { // Import web3signers. tester .client - .post_lighthouse_validators_web3signer(&vec![web3signer_req.clone()]) + .post_lighthouse_validators_web3signer(&[web3signer_req.clone()]) .await .unwrap(); diff --git a/validator_client/initialized_validators/src/key_cache.rs b/validator_client/initialized_validators/src/key_cache.rs index a5a481923d..b600013c8b 100644 --- a/validator_client/initialized_validators/src/key_cache.rs +++ b/validator_client/initialized_validators/src/key_cache.rs @@ -291,7 +291,7 @@ mod tests { #[tokio::test] async fn test_encryption() { let mut key_cache = KeyCache::new(); - let keypairs = vec![Keypair::random(), Keypair::random()]; + let keypairs = [Keypair::random(), Keypair::random()]; let uuids = vec![Uuid::from_u128(1), Uuid::from_u128(2)]; let passwords = vec![ PlainText::from(vec![1, 2, 3, 4, 5, 6]), From b57d046c4ad9cf60a8053c4c43ea99e4b326bc01 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sun, 2 Nov 2025 16:51:42 -0800 Subject: [PATCH 25/44] Fix CGC backfill race condition (#8267) During custody backfill sync there could be an edge case where we update CGC at the same time where we are importing a batch of columns which may cause us to incorrectly overwrite values when calling `backfill_validator_custody_requirements`. To prevent this race condition, the expected cgc is now passed into this function and is used to check if the expected cgc == the current validator cgc. If the values arent equal, this probably indicates that a very recent CGC occurred so we do not prune/update values in the `epoch_validator_custody_requirements` map. Co-Authored-By: Eitan Seri-Levi --- .../beacon_chain/src/custody_context.rs | 79 +++++++++++++-- .../src/historical_data_columns.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 97 ++++--------------- .../src/network_beacon_processor/mod.rs | 4 +- .../network_beacon_processor/sync_methods.rs | 10 +- .../src/sync/custody_backfill_sync/mod.rs | 1 + 6 files changed, 103 insertions(+), 91 deletions(-) diff --git a/beacon_node/beacon_chain/src/custody_context.rs b/beacon_node/beacon_chain/src/custody_context.rs index 0da0e7573e..9a6f51174a 100644 --- a/beacon_node/beacon_chain/src/custody_context.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -134,8 +134,17 @@ impl ValidatorRegistrations { /// /// This is done by pruning all values on/after `effective_epoch` and updating the map to store /// the latest validator custody requirements for the `effective_epoch`. - pub fn backfill_validator_custody_requirements(&mut self, effective_epoch: Epoch) { + pub fn backfill_validator_custody_requirements( + &mut self, + effective_epoch: Epoch, + expected_cgc: u64, + ) { if let Some(latest_validator_custody) = self.latest_validator_custody_requirement() { + // If the expected cgc isn't equal to the latest validator custody a very recent cgc change may have occurred. + // We should not update the mapping. + if expected_cgc != latest_validator_custody { + return; + } // Delete records if // 1. The epoch is greater than or equal than `effective_epoch` // 2. the cgc requirements match the latest validator custody requirements @@ -517,10 +526,14 @@ impl CustodyContext { /// The node has completed backfill for this epoch. Update the internal records so the function /// [`Self::custody_columns_for_epoch()`] returns up-to-date results. - pub fn update_and_backfill_custody_count_at_epoch(&self, effective_epoch: Epoch) { + pub fn update_and_backfill_custody_count_at_epoch( + &self, + effective_epoch: Epoch, + expected_cgc: u64, + ) { self.validator_registrations .write() - .backfill_validator_custody_requirements(effective_epoch); + .backfill_validator_custody_requirements(effective_epoch, expected_cgc); } } @@ -604,11 +617,13 @@ mod tests { custody_context: &CustodyContext, start_epoch: Epoch, end_epoch: Epoch, + expected_cgc: u64, ) { assert!(start_epoch >= end_epoch); // Call from end_epoch down to start_epoch (inclusive), simulating backfill for epoch in (end_epoch.as_u64()..=start_epoch.as_u64()).rev() { - custody_context.update_and_backfill_custody_count_at_epoch(Epoch::new(epoch)); + custody_context + .update_and_backfill_custody_count_at_epoch(Epoch::new(epoch), expected_cgc); } } @@ -1368,7 +1383,7 @@ mod tests { ); // Backfill from epoch 20 down to 15 (simulating backfill) - complete_backfill_for_epochs(&custody_context, head_epoch, Epoch::new(15)); + complete_backfill_for_epochs(&custody_context, head_epoch, Epoch::new(15), final_cgc); // After backfilling to epoch 15, it should use latest CGC (32) assert_eq!( @@ -1406,7 +1421,7 @@ mod tests { let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); // Backfill to epoch 15 (between the two CGC increases) - complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15)); + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15), final_cgc); // Verify epochs 15 - 20 return latest CGC (32) for epoch in 15..=20 { @@ -1424,4 +1439,56 @@ mod tests { ); } } + + #[test] + fn attempt_backfill_with_invalid_cgc() { + let spec = E::default_spec(); + let initial_cgc = 8u64; + let mid_cgc = 16u64; + let final_cgc = 32u64; + + // Setup: Node restart after multiple validator registrations causing CGC increases + let head_epoch = Epoch::new(20); + let epoch_and_cgc_tuples = vec![ + (Epoch::new(0), initial_cgc), + (Epoch::new(10), mid_cgc), + (head_epoch, final_cgc), + ]; + let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); + + // Backfill to epoch 15 (between the two CGC increases) + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15), final_cgc); + + // Verify epochs 15 - 20 return latest CGC (32) + for epoch in 15..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + + // Attempt backfill with an incorrect cgc value + complete_backfill_for_epochs( + &custody_context, + Epoch::new(20), + Epoch::new(15), + initial_cgc, + ); + + // Verify epochs 15 - 20 still return latest CGC (32) + for epoch in 15..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + + // Verify epochs 10-14 still return mid_cgc (16) + for epoch in 10..14 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + mid_cgc, + ); + } + } } diff --git a/beacon_node/beacon_chain/src/historical_data_columns.rs b/beacon_node/beacon_chain/src/historical_data_columns.rs index 7e196eb75e..9304f06570 100644 --- a/beacon_node/beacon_chain/src/historical_data_columns.rs +++ b/beacon_node/beacon_chain/src/historical_data_columns.rs @@ -54,6 +54,7 @@ impl BeaconChain { &self, epoch: Epoch, historical_data_column_sidecar_list: DataColumnSidecarList, + expected_cgc: u64, ) -> Result { let mut total_imported = 0; let mut ops = vec![]; @@ -136,7 +137,7 @@ impl BeaconChain { self.data_availability_checker .custody_context() - .update_and_backfill_custody_count_at_epoch(epoch); + .update_and_backfill_custody_count_at_epoch(epoch, expected_cgc); self.safely_backfill_data_column_custody_info(epoch) .map_err(|e| HistoricalDataColumnError::BeaconChainError(Box::new(e)))?; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 53e841692e..7891b22432 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3182,6 +3182,8 @@ async fn weak_subjectivity_sync_test( assert_eq!(store.get_anchor_info().state_upper_limit, Slot::new(0)); } +// This test prunes data columns from epoch 0 and then tries to re-import them via +// the same code paths that custody backfill sync imports data columns #[tokio::test] async fn test_import_historical_data_columns_batch() { let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); @@ -3189,6 +3191,7 @@ async fn test_import_historical_data_columns_batch() { let store = get_store_generic(&db_path, StoreConfig::default(), spec); let start_slot = Epoch::new(0).start_slot(E::slots_per_epoch()) + 1; let end_slot = Epoch::new(0).end_slot(E::slots_per_epoch()); + let cgc = 128; let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); @@ -3208,6 +3211,7 @@ async fn test_import_historical_data_columns_batch() { let mut data_columns_list = vec![]; + // Get all data columns for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); @@ -3227,6 +3231,7 @@ async fn test_import_historical_data_columns_batch() { harness.advance_slot(); + // Prune data columns harness .chain .store @@ -3238,21 +3243,25 @@ async fn test_import_historical_data_columns_batch() { .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); + // Assert that data columns no longer exist for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); assert!(data_columns.is_none()) } + // Re-import deleted data columns harness .chain - .import_historical_data_column_batch(Epoch::new(0), data_columns_list) + .import_historical_data_column_batch(Epoch::new(0), data_columns_list, cgc) .unwrap(); + let block_root_iter = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); + // Assert that data columns now exist for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); @@ -3261,6 +3270,7 @@ async fn test_import_historical_data_columns_batch() { } // This should verify that a data column sidecar containing mismatched block roots should fail to be imported. +// This also covers any test cases related to data columns with incorrect/invalid/mismatched block roots. #[tokio::test] async fn test_import_historical_data_columns_batch_mismatched_block_root() { let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); @@ -3268,6 +3278,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { let store = get_store_generic(&db_path, StoreConfig::default(), spec); let start_slot = Slot::new(1); let end_slot = Slot::new(E::slots_per_epoch() * 2 - 1); + let cgc = 128; let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); @@ -3287,6 +3298,8 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { let mut data_columns_list = vec![]; + // Get all data columns from start_slot to end_slot + // and mutate the data columns with an invalid block root for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); @@ -3312,6 +3325,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { harness.advance_slot(); + // Prune blobs harness .chain .store @@ -3323,17 +3337,20 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); + // Assert there are no columns between start_slot and end_slot for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); assert!(data_columns.is_none()) } + // Attempt to import data columns with invalid block roots and expect a failure let error = harness .chain .import_historical_data_column_batch( start_slot.epoch(E::slots_per_epoch()), data_columns_list, + cgc, ) .unwrap_err(); @@ -3343,84 +3360,6 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { )); } -// This should verify that a data column sidecar associated to a block root that doesn't exist in the store cannot -// be imported. -#[tokio::test] -async fn test_import_historical_data_columns_batch_no_block_found() { - let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); - let db_path = tempdir().unwrap(); - let store = get_store_generic(&db_path, StoreConfig::default(), spec); - let start_slot = Slot::new(1); - let end_slot = Slot::new(E::slots_per_epoch() * 2 - 1); - - let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); - - harness - .extend_chain( - (E::slots_per_epoch() * 2) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - harness.advance_slot(); - - let block_root_iter = harness - .chain - .forwards_iter_block_roots_until(start_slot, end_slot) - .unwrap(); - - let mut data_columns_list = vec![]; - - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_some()); - - for data_column in data_columns.unwrap() { - let mut data_column = (*data_column).clone(); - data_column.signed_block_header.message.body_root = Hash256::ZERO; - data_columns_list.push(Arc::new(data_column)); - } - } - - harness - .extend_chain( - (E::slots_per_epoch() * 4) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - harness.advance_slot(); - - harness - .chain - .store - .try_prune_blobs(true, Epoch::new(2)) - .unwrap(); - - let block_root_iter = harness - .chain - .forwards_iter_block_roots_until(start_slot, end_slot) - .unwrap(); - - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_none()) - } - - let error = harness - .chain - .import_historical_data_column_batch(Epoch::new(0), data_columns_list) - .unwrap_err(); - - assert!(matches!( - error, - HistoricalDataColumnError::NoBlockFound { .. } - )); -} - /// Test that blocks and attestations that refer to states around an unaligned split state are /// processed correctly. #[tokio::test] diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7441e92871..5fa2361f28 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -497,9 +497,11 @@ impl NetworkBeaconProcessor { self: &Arc, batch_id: CustodyBackfillBatchId, data_columns: DataColumnSidecarList, + expected_cgc: u64, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || processor.process_historic_data_columns(batch_id, data_columns); + let process_fn = + move || processor.process_historic_data_columns(batch_id, data_columns, expected_cgc); let work = Work::ChainSegmentBackfill(Box::new(process_fn)); diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 41b12fa01b..41160fcfe4 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -426,6 +426,7 @@ impl NetworkBeaconProcessor { &self, batch_id: CustodyBackfillBatchId, downloaded_columns: DataColumnSidecarList, + expected_cgc: u64, ) { let _guard = debug_span!( SPAN_CUSTODY_BACKFILL_SYNC_IMPORT_COLUMNS, @@ -435,10 +436,11 @@ impl NetworkBeaconProcessor { .entered(); let sent_columns = downloaded_columns.len(); - let result = match self - .chain - .import_historical_data_column_batch(batch_id.epoch, downloaded_columns) - { + let result = match self.chain.import_historical_data_column_batch( + batch_id.epoch, + downloaded_columns, + expected_cgc, + ) { Ok(imported_columns) => { metrics::inc_counter_by( &metrics::BEACON_PROCESSOR_CUSTODY_BACKFILL_COLUMN_IMPORT_SUCCESS_TOTAL, diff --git a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs index 69df3422e6..5c5505083f 100644 --- a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs @@ -504,6 +504,7 @@ impl CustodyBackFillSync { run_id: self.run_id, }, data_columns, + self.cgc, ) { crit!( msg = "process_batch", From 2c9b670f5d313450252c6cb40a5ee34802d54fef Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 3 Nov 2025 06:46:31 +0400 Subject: [PATCH 26/44] Rework `lighthouse_version` to reduce spurious recompilation (#8336) #8311 Removes the `git_version` crate from `lighthouse_version` and implements git `HEAD` tracking manually. This removes the (mostly) broken dirty tracking but prevents spurious recompilation of the `lighthouse_version` crate. This also reworks the way crate versions are handled by utilizing workspace version inheritance and Cargo environment variables. This means the _only_ place where Lighthouse's version is defined is in the top level `Cargo.toml` for the workspace. All relevant binaries then inherit this version. This largely makes the `change_version.sh` script useless so I've removed it, although we could keep a version which just alters the workspace version (if we need to maintain compatibility with certain build/release tooling. ### When is a Rebuild Triggered? 1. When the build.rs file is changed. 2. When the HEAD commit changes (added, removed, rebased, etc) 3. When the branch changes (this includes changing to the current branch, and creating a detached HEAD) Note that working/staged changes will not trigger a recompile of `lighthouse_version`. Co-Authored-By: Mac L Co-Authored-By: Michael Sproul --- Cargo.lock | 27 ++-------- Cargo.toml | 1 + account_manager/Cargo.toml | 2 +- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/Cargo.toml | 6 +-- common/lighthouse_version/build.rs | 81 ++++++++++++++++++++++++++++ common/lighthouse_version/src/lib.rs | 50 +++++++---------- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- scripts/change_version.sh | 34 ------------ validator_client/Cargo.toml | 2 +- 12 files changed, 110 insertions(+), 101 deletions(-) create mode 100644 common/lighthouse_version/build.rs delete mode 100755 scripts/change_version.sh diff --git a/Cargo.lock b/Cargo.lock index 8a282a60b7..bf2d6dd2d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 4 [[package]] name = "account_manager" -version = "0.3.5" +version = "8.0.0-rc.2" dependencies = [ "account_utils", "bls", @@ -3860,26 +3860,6 @@ version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" -[[package]] -name = "git-version" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" -dependencies = [ - "git-version-macro", -] - -[[package]] -name = "git-version-macro" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "glob" version = "0.3.2" @@ -5711,9 +5691,8 @@ dependencies = [ [[package]] name = "lighthouse_version" -version = "0.1.0" +version = "8.0.0-rc.2" dependencies = [ - "git-version", "regex", ] @@ -10112,7 +10091,7 @@ dependencies = [ [[package]] name = "validator_client" -version = "0.3.5" +version = "8.0.0-rc.2" dependencies = [ "account_utils", "beacon_node_fallback", diff --git a/Cargo.toml b/Cargo.toml index a979907769..0ca8cbf83c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,6 +94,7 @@ resolver = "2" [workspace.package] edition = "2024" +version = "8.0.0-rc.2" [workspace.dependencies] account_utils = { path = "common/account_utils" } diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 071e2681dd..8dd50cbc6e 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "account_manager" -version = "0.3.5" +version = { workspace = true } authors = [ "Paul Hauner ", "Luke Anderson ", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 56c2fb410c..fd01355978 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "8.0.0-rc.2" +version = { workspace = true } authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/Cargo.toml b/common/lighthouse_version/Cargo.toml index b7e669ed94..ab9509cb1e 100644 --- a/common/lighthouse_version/Cargo.toml +++ b/common/lighthouse_version/Cargo.toml @@ -1,12 +1,8 @@ [package] name = "lighthouse_version" -version = "0.1.0" +version = { workspace = true } authors = ["Sigma Prime "] edition = { workspace = true } -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -git-version = "0.3.4" [dev-dependencies] regex = { workspace = true } diff --git a/common/lighthouse_version/build.rs b/common/lighthouse_version/build.rs new file mode 100644 index 0000000000..1af99996df --- /dev/null +++ b/common/lighthouse_version/build.rs @@ -0,0 +1,81 @@ +use std::env; +use std::fs; +use std::path::Path; +use std::process::Command; + +const CLIENT_NAME: &str = "Lighthouse"; + +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + + let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); + let manifest_path = Path::new(&manifest_dir); + + // The crate version is inherited from the workspace. + let semantic_version = env::var("CARGO_PKG_VERSION").unwrap(); + + // Hardcode the .git/ path. + // This assumes the `lighthouse_version` crate will never move. + let git_dir = manifest_path.join("../../.git"); + + if git_dir.exists() { + // HEAD either contains a commit hash directly (detached HEAD), or a reference to a branch. + let head_path = git_dir.join("HEAD"); + if head_path.exists() { + println!("cargo:rerun-if-changed={}", head_path.display()); + + if let Ok(head_content) = fs::read_to_string(&head_path) { + let head_content = head_content.trim(); + + // If HEAD is a reference, also check that file. + if let Some(ref_path) = head_content.strip_prefix("ref: ") { + let full_ref_path = git_dir.join(ref_path); + if full_ref_path.exists() { + println!("cargo:rerun-if-changed={}", full_ref_path.display()); + } + } + } + } + } + + // Construct Lighthouse version string without commit hash. + let base_version = format!("{}/v{}", CLIENT_NAME, semantic_version); + + let commit_hash = get_git_hash(7); + let commit_prefix = get_git_hash(8); + + // If commit hash is valid, construct the full version string. + let version = if !commit_hash.is_empty() && commit_hash.len() >= 7 { + format!("{}-{}", base_version, commit_hash) + } else { + base_version + }; + + println!("cargo:rustc-env=GIT_VERSION={}", version); + println!("cargo:rustc-env=GIT_COMMIT_PREFIX={}", commit_prefix); + println!("cargo:rustc-env=CLIENT_NAME={}", CLIENT_NAME); + println!("cargo:rustc-env=SEMANTIC_VERSION={}", semantic_version); +} + +fn get_git_hash(len: usize) -> String { + Command::new("git") + .args(["rev-parse", &format!("--short={}", len), "HEAD"]) + .output() + .ok() + .and_then(|output| { + if output.status.success() { + String::from_utf8(output.stdout).ok() + } else { + None + } + }) + .map(|s| s.trim().to_string()) + .unwrap_or_else(|| { + // Fallback commit prefix for execution engine reporting. + if len == 8 { + "00000000".to_string() + } else { + String::new() + } + }) +} diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index bd7b37926f..1466487520 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -1,49 +1,25 @@ -use git_version::git_version; use std::env::consts; /// Returns the current version of this build of Lighthouse. /// -/// A plus-sign (`+`) is appended to the git commit if the tree is dirty. /// Commit hash is omitted if the sources don't include git information. /// /// ## Example /// -/// `Lighthouse/v1.5.1-67da032+` -pub const VERSION: &str = git_version!( - args = [ - "--always", - "--dirty=+", - "--abbrev=7", - // NOTE: using --match instead of --exclude for compatibility with old Git - "--match=thiswillnevermatchlol" - ], - prefix = "Lighthouse/v8.0.0-rc.2-", - fallback = "Lighthouse/v8.0.0-rc.2" -); +/// `Lighthouse/v8.0.0-67da032` +pub const VERSION: &str = env!("GIT_VERSION"); /// Returns the first eight characters of the latest commit hash for this build. /// /// No indication is given if the tree is dirty. This is part of the standard /// for reporting the client version to the execution engine. -pub const COMMIT_PREFIX: &str = git_version!( - args = [ - "--always", - "--abbrev=8", - // NOTE: using --match instead of --exclude for compatibility with old Git - "--match=thiswillnevermatchlol" - ], - prefix = "", - suffix = "", - cargo_prefix = "", - cargo_suffix = "", - fallback = "00000000" -); +pub const COMMIT_PREFIX: &str = env!("GIT_COMMIT_PREFIX"); /// Returns `VERSION`, but with platform information appended to the end. /// /// ## Example /// -/// `Lighthouse/v1.5.1-67da032+/x86_64-linux` +/// `Lighthouse/v8.0.0-67da032/x86_64-linux` pub fn version_with_platform() -> String { format!("{}/{}-{}", VERSION, consts::ARCH, consts::OS) } @@ -52,16 +28,16 @@ pub fn version_with_platform() -> String { /// /// ## Example /// -/// `1.5.1` +/// `8.0.0` pub fn version() -> &'static str { - "8.0.0-rc.2" + env!("SEMANTIC_VERSION") } /// Returns the name of the current client running. /// /// This will usually be "Lighthouse" pub fn client_name() -> &'static str { - "Lighthouse" + env!("CLIENT_NAME") } #[cfg(test)] @@ -72,7 +48,7 @@ mod test { #[test] fn version_formatting() { let re = Regex::new( - r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-(rc|beta).[0-9])?(-[[:xdigit:]]{7})?\+?$", + r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-(rc|beta)\.[0-9])?(-[[:xdigit:]]{7})?$", ) .unwrap(); assert!( @@ -91,4 +67,14 @@ mod test { version() ); } + + #[test] + fn client_name_is_lighthouse() { + assert_eq!(client_name(), "Lighthouse"); + } + + #[test] + fn version_contains_semantic_version() { + assert!(VERSION.contains(version())); + } } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 2698073b5f..04eb41960b 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "8.0.0-rc.2" +version = { workspace = true } authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index a3240c6d7c..ebe00c9be5 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "8.0.0-rc.2" +version = { workspace = true } authors = ["Sigma Prime "] edition = { workspace = true } autotests = false diff --git a/scripts/change_version.sh b/scripts/change_version.sh deleted file mode 100755 index bda87fd863..0000000000 --- a/scripts/change_version.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -# Change the version across multiple files, prior to a release. Use `sed` to -# find/replace the exiting version with the new one. -# -# Takes two arguments: -# -# 1. Current version (e.g., `0.2.6`) -# 2. New version (e.g., `0.2.7`) -# -# ## Example: -# -# `./change_version.sh 0.2.6 0.2.7` - -FROM=$1 -TO=$2 -VERSION_CRATE="../common/lighthouse_version/src/lib.rs" - -update_cargo_toml () { - echo $1 - sed -i -e "s/version = \"$FROM\"/version = \"$TO\"/g" $1 -} - -echo "Changing version from $FROM to $TO" - -update_cargo_toml ../account_manager/Cargo.toml -update_cargo_toml ../beacon_node/Cargo.toml -update_cargo_toml ../boot_node/Cargo.toml -update_cargo_toml ../lcli/Cargo.toml -update_cargo_toml ../lighthouse/Cargo.toml -update_cargo_toml ../validator_client/Cargo.toml - -echo $VERSION_CRATE -sed -i -e "s/$FROM/$TO/g" $VERSION_CRATE diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index a8c8fd59f1..6990a2f61a 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "validator_client" -version = "0.3.5" +version = { workspace = true } authors = ["Sigma Prime "] edition = { workspace = true } From 25832e586246823007209e6841776d772dd8eda4 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sun, 2 Nov 2025 22:53:13 -0800 Subject: [PATCH 27/44] Add mainnet configs (#8344) #8135 mainnet config PR: https://github.com/eth-clients/mainnet/pull/11 Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Michael Sproul Co-Authored-By: Tan Chee Keong --- .../src/data_column_verification.rs | 16 ++++ .../test_utils/execution_block_generator.rs | 40 +++++++- .../mainnet/config.yaml | 95 +++++++++++++------ consensus/types/presets/gnosis/electra.yaml | 3 +- consensus/types/src/chain_spec.rs | 34 ++++++- 5 files changed, 150 insertions(+), 38 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 07f85b045a..61fc0677b1 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -850,6 +850,22 @@ mod test { .build(); harness.advance_slot(); + // Check block generator timestamp conversion sanity. + { + let exec_block_generator = harness.execution_block_generator(); + assert_eq!( + exec_block_generator + .timestamp_to_slot_post_capella(exec_block_generator.osaka_time.unwrap()), + 0 + ); + assert_eq!( + exec_block_generator.timestamp_to_slot_post_capella( + exec_block_generator.osaka_time.unwrap() + harness.spec.seconds_per_slot + ), + 1 + ); + } + let verify_fn = |column_sidecar: DataColumnSidecar| { GossipVerifiedDataColumn::<_>::new_for_block_publishing( column_sidecar.into(), diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index f1d07ae258..44e72cba6b 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -21,7 +21,7 @@ use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, - KzgProofs, Transaction, Transactions, Uint256, + KzgProofs, Slot, Transaction, Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; @@ -265,6 +265,37 @@ impl ExecutionBlockGenerator { ForkName::Bellatrix } + /// Get the timestamp at which `fork` activates. + /// + /// This function will panic if the `fork` is not enabled or is `<= ForkName::Bellatrix`. + pub fn get_fork_timestamp_post_capella(&self, fork: ForkName) -> u64 { + match fork { + ForkName::Gloas => self.amsterdam_time, + ForkName::Fulu => self.osaka_time, + ForkName::Electra => self.prague_time, + ForkName::Deneb => self.cancun_time, + ForkName::Capella => self.shanghai_time, + _ => panic!("only the Capella fork or later is supported"), + } + .unwrap_or_else(|| panic!("fork is {fork} but no corresponding timestamp is set")) + } + + /// This is a slightly nasty method for converting timestamps to slots, but it will suffice + /// until we can plumb through a slot clock. + pub fn timestamp_to_slot_post_capella(&self, timestamp: u64) -> Slot { + let fork = self.get_fork_at_timestamp(timestamp); + let fork_epoch = self.spec.fork_epoch(fork).unwrap(); + let fork_timestamp = self.get_fork_timestamp_post_capella(fork); + + // Number of slots since fork. + let slot_offset = timestamp + .checked_sub(fork_timestamp) + .expect("timestamp should be >= fork timestamp") + / self.spec.seconds_per_slot; + + fork_epoch.start_slot(E::slots_per_epoch()) + Slot::new(slot_offset) + } + pub fn execution_block_by_number(&self, number: u64) -> Option { self.block_by_number(number) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) @@ -734,9 +765,10 @@ impl ExecutionBlockGenerator { if fork_name.deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - // TODO(EIP-7892): see FIXME below - // FIXME: this will break with BPO forks. This function needs to calculate the epoch based on block timestamp.. - let max_blobs = self.spec.max_blobs_per_block_within_fork(fork_name) as usize; + let epoch = self + .timestamp_to_slot_post_capella(execution_payload.timestamp()) + .epoch(E::slots_per_epoch()); + let max_blobs = self.spec.max_blobs_per_block(epoch) as usize; let num_blobs = rng.random_range(self.min_blobs_count..=max_blobs); let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index ca7f85b512..49168018cb 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -6,7 +6,9 @@ PRESET_BASE: 'mainnet' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one +# * 'sepolia' - testnet # * 'holesky' - testnet +# * 'hoodi' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'mainnet' @@ -52,23 +54,37 @@ ELECTRA_FORK_VERSION: 0x05000000 ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC # Fulu FULU_FORK_VERSION: 0x06000000 -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 411392 # December 3, 2025, 09:49:11pm UTC # Gloas GLOAS_FORK_VERSION: 0x07000000 GLOAS_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- -# 12 seconds +# 12 seconds (*deprecated*) SECONDS_PER_SLOT: 12 +# 12000 milliseconds +SLOT_DURATION_MS: 12000 # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours +# 2**8 (= 256) epochs MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours +# 2**8 (= 256) epochs SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours +# 2**11 (= 2,048) Eth1 blocks ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle # --------------------------------------------------------------- @@ -78,13 +94,21 @@ INACTIVITY_SCORE_BIAS: 4 INACTIVITY_SCORE_RECOVERY_RATE: 16 # 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) +# 2**2 (= 4) validators MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 -# [New in Deneb:EIP7514] 2**3 (= 8) + +# Deneb +# 2**3 (= 8) (*deprecated*) MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 +# Electra +# 2**7 * 10**9 (= 128,000,000,000) Gwei +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) Gwei +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 + # Fork choice # --------------------------------------------------------------- # 40% @@ -93,7 +117,7 @@ PROPOSER_SCORE_BOOST: 40 REORG_HEAD_WEIGHT_THRESHOLD: 20 # 160% REORG_PARENT_WEIGHT_THRESHOLD: 160 -# `2` epochs +# 2 epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 # Deposit contract @@ -105,18 +129,19 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- -# `10 * 2**20` (= 10485760, 10 MiB) +# 10 * 2**20 (= 10,485,760) bytes, 10 MiB MAX_PAYLOAD_SIZE: 10485760 -# `2**10` (= 1024) +# 2**10 (= 1,024) blocks MAX_REQUEST_BLOCKS: 1024 -# `2**8` (= 256) +# 2**8 (= 256) epochs EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 -# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +# MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33,024) epochs MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 # 5s TTFB_TIMEOUT: 5 # 10s RESP_TIMEOUT: 10 +# 2**5 (= 32) slots ATTESTATION_PROPAGATION_SLOT_RANGE: 32 # 500ms MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 @@ -124,45 +149,59 @@ MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 # 2 subnets per node SUBNETS_PER_NODE: 2 -# 2**8 (= 64) +# 2**6 (= 64) subnets ATTESTATION_SUBNET_COUNT: 64 +# 0 bits ATTESTATION_SUBNET_EXTRA_BITS: 0 -# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS (= 6 + 0) bits ATTESTATION_SUBNET_PREFIX_BITS: 6 ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # Deneb -# `2**7` (=128) +# 2**7 (= 128) blocks MAX_REQUEST_BLOCKS_DENEB: 128 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK -MAX_REQUEST_BLOB_SIDECARS: 768 -# `2**12` (= 4096 epochs, ~18 days) +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 -# `6` +# 6 subnets BLOB_SIDECAR_SUBNET_COUNT: 6 -# `uint64(6)` +# 6 blobs MAX_BLOBS_PER_BLOCK: 6 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK (= 128 * 6) sidecars +MAX_REQUEST_BLOB_SIDECARS: 768 # Electra -# 2**7 * 10**9 (= 128,000,000,000) -MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 -# 2**8 * 10**9 (= 256,000,000,000) -MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 -# `9` +# 9 subnets BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 -# `uint64(9)` +# 9 blobs MAX_BLOBS_PER_BLOCK_ELECTRA: 9 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA (= 128 * 9) sidecars MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 # Fulu +# 2**7 (= 128) groups NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS (= 128 * 128) sidecars MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +# 2**3 (= 8) samples SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 -# Gloas \ No newline at end of file +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + - EPOCH: 412672 # December 9, 2025, 02:21:11pm UTC + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 419072 # January 7, 2026, 01:01:11am UTC + MAX_BLOBS_PER_BLOCK: 21 + +# Gloas diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index 42afbb233e..6885667c6e 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -41,8 +41,7 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # Withdrawals processing # --------------------------------------------------------------- -# 2**3 ( = 8) pending withdrawals -MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 6 # Pending deposits processing # --------------------------------------------------------------- diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 93f5140383..3565c714e0 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -87,12 +87,18 @@ pub struct ChainSpec { */ pub genesis_delay: u64, pub seconds_per_slot: u64, + pub slot_duration_ms: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub max_seed_lookahead: Epoch, pub min_epochs_to_inactivity_penalty: u64, pub min_validator_withdrawability_delay: Epoch, pub shard_committee_period: u64, + pub proposer_reorg_cutoff_bps: u64, + pub attestation_due_bps: u64, + pub aggregate_due_bps: u64, + pub sync_message_due_bps: u64, + pub contribution_due_bps: u64, /* * Reward and penalty quotients @@ -964,12 +970,18 @@ impl ChainSpec { */ genesis_delay: 604800, // 7 days seconds_per_slot: 12, + slot_duration_ms: 12000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), max_seed_lookahead: Epoch::new(4), min_epochs_to_inactivity_penalty: 4, min_validator_withdrawability_delay: Epoch::new(256), shard_committee_period: 256, + proposer_reorg_cutoff_bps: 1667, + attestation_due_bps: 3333, + aggregate_due_bps: 6667, + sync_message_due_bps: 3333, + contribution_due_bps: 6667, /* * Reward and penalty quotients @@ -1098,7 +1110,7 @@ impl ChainSpec { * Fulu hard fork params */ fulu_fork_version: [0x06, 0x00, 0x00, 0x00], - fulu_fork_epoch: None, + fulu_fork_epoch: Some(Epoch::new(411392)), custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, @@ -1158,7 +1170,16 @@ impl ChainSpec { /* * Networking Fulu specific */ - blob_schedule: BlobSchedule::default(), + blob_schedule: BlobSchedule::new(vec![ + BlobParameters { + epoch: Epoch::new(412672), + max_blobs_per_block: 15, + }, + BlobParameters { + epoch: Epoch::new(419072), + max_blobs_per_block: 21, + }, + ]), min_epochs_for_data_column_sidecars_requests: default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), @@ -1310,12 +1331,18 @@ impl ChainSpec { */ genesis_delay: 6000, // 100 minutes seconds_per_slot: 5, + slot_duration_ms: 5000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), max_seed_lookahead: Epoch::new(4), min_epochs_to_inactivity_penalty: 4, min_validator_withdrawability_delay: Epoch::new(256), shard_committee_period: 256, + proposer_reorg_cutoff_bps: 1667, + attestation_due_bps: 3333, + aggregate_due_bps: 6667, + sync_message_due_bps: 3333, + contribution_due_bps: 6667, /* * Reward and penalty quotients @@ -1429,8 +1456,7 @@ impl ChainSpec { .expect("pow does not overflow"), whistleblower_reward_quotient_electra: u64::checked_pow(2, 12) .expect("pow does not overflow"), - max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 3) - .expect("pow does not overflow"), + max_pending_partials_per_withdrawals_sweep: 6, min_per_epoch_churn_limit_electra: option_wrapper(|| { u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) }) From 4908687e7d0b8b00dd6ecd3f7e646d952155a02f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 3 Nov 2025 19:06:03 +1100 Subject: [PATCH 28/44] Proposer duties backwards compat (#8335) The beacon API spec wasn't updated to use the Fulu definition of `dependent_root` for the proposer duties endpoint. No other client updated their logic, so to retain backwards compatibility the decision has been made to continue using the block root at the end of epoch `N - 1`, and introduce a new v2 endpoint down the track to use the correct dependent root. Eth R&D discussion: https://discord.com/channels/595666850260713488/598292067260825641/1433036715848765562 Change the behaviour of the v1 endpoint back to using the last slot of `N - 1` rather than the last slot of `N - 2`. This introduces the possibility of dependent root false positives (the root can change without changing the shuffling), but causes the least compatibility issues with other clients. Co-Authored-By: Michael Sproul --- .../beacon_chain/src/beacon_proposer_cache.rs | 23 ++++++++++++++-- beacon_node/beacon_chain/tests/store_tests.rs | 9 ++++--- beacon_node/http_api/src/proposer_duties.rs | 27 +++++++++++++------ .../http_api/tests/interactive_tests.rs | 3 +-- consensus/types/src/beacon_state.rs | 16 +++++++++++ testing/ef_tests/src/cases/fork_choice.rs | 2 +- 6 files changed, 64 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 6effce49f8..bd6460eba7 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -166,10 +166,17 @@ impl BeaconProposerCache { } /// Compute the proposer duties using the head state without cache. +/// +/// Return: +/// - Proposer indices. +/// - True dependent root. +/// - Legacy dependent root (last block of epoch `N - 1`). +/// - Head execution status. +/// - Fork at `request_epoch`. pub fn compute_proposer_duties_from_head( request_epoch: Epoch, chain: &BeaconChain, -) -> Result<(Vec, Hash256, ExecutionStatus, Fork), BeaconChainError> { +) -> Result<(Vec, Hash256, Hash256, ExecutionStatus, Fork), BeaconChainError> { // Atomically collect information about the head whilst holding the canonical head `Arc` as // short as possible. let (mut state, head_state_root, head_block_root) = { @@ -203,11 +210,23 @@ pub fn compute_proposer_duties_from_head( .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(BeaconChainError::from)?; + // This is only required because the V1 proposer duties endpoint spec wasn't updated for Fulu. We + // can delete this once the V1 endpoint is deprecated at the Glamsterdam fork. + let legacy_dependent_root = state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(BeaconChainError::from)?; + // Use fork_at_epoch rather than the state's fork, because post-Fulu we may not have advanced // the state completely into the new epoch. let fork = chain.spec.fork_at_epoch(request_epoch); - Ok((indices, dependent_root, execution_status, fork)) + Ok(( + indices, + dependent_root, + legacy_dependent_root, + execution_status, + fork, + )) } /// If required, advance `state` to the epoch required to determine proposer indices in `target_epoch`. diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 7891b22432..0c83244f44 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1561,7 +1561,7 @@ async fn proposer_duties_from_head_fulu() { // Compute the proposer duties at the next epoch from the head let next_epoch = head_state.next_epoch().unwrap(); - let (_indices, dependent_root, _, fork) = + let (_indices, dependent_root, legacy_dependent_root, _, fork) = compute_proposer_duties_from_head(next_epoch, &harness.chain).unwrap(); assert_eq!( @@ -1570,6 +1570,8 @@ async fn proposer_duties_from_head_fulu() { .proposer_shuffling_decision_root_at_epoch(next_epoch, head_block_root.into(), spec) .unwrap() ); + assert_ne!(dependent_root, legacy_dependent_root); + assert_eq!(legacy_dependent_root, Hash256::from(head_block_root)); assert_eq!(fork, head_state.fork()); } @@ -1617,7 +1619,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { assert_eq!(head_state.current_epoch(), gloas_fork_epoch - 1); // Compute the proposer duties at the fork epoch from the head. - let (indices, dependent_root, _, fork) = + let (indices, dependent_root, legacy_dependent_root, _, fork) = compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap(); assert_eq!( @@ -1630,6 +1632,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { ) .unwrap() ); + assert_ne!(dependent_root, legacy_dependent_root); assert_ne!(fork, head_state.fork()); assert_eq!(fork, spec.fork_at_epoch(gloas_fork_epoch)); @@ -1639,7 +1642,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { .add_attested_blocks_at_slots(head_state, head_state_root, &gloas_slots, &all_validators) .await; - let (no_lookahead_indices, no_lookahead_dependent_root, _, no_lookahead_fork) = + let (no_lookahead_indices, no_lookahead_dependent_root, _, _, no_lookahead_fork) = compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap(); assert_eq!(no_lookahead_indices, indices); diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 78f99c475c..1ebb174785 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -60,13 +60,13 @@ pub fn proposer_duties( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, execution_status, _fork) = + let (proposers, _dependent_root, legacy_dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( chain, request_epoch, - dependent_root, + legacy_dependent_root, execution_status.is_optimistic_or_invalid(), proposers, ) @@ -116,6 +116,11 @@ fn try_proposer_duties_from_cache( .beacon_state .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(warp_utils::reject::beacon_state_error)?; + let legacy_dependent_root = head + .snapshot + .beacon_state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::unhandled_error)?; @@ -129,7 +134,7 @@ fn try_proposer_duties_from_cache( convert_to_api_response( chain, request_epoch, - head_decision_root, + legacy_dependent_root, execution_optimistic, indices.to_vec(), ) @@ -151,7 +156,7 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { - let (indices, dependent_root, execution_status, fork) = + let (indices, dependent_root, legacy_dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; @@ -166,7 +171,7 @@ fn compute_and_cache_proposer_duties( convert_to_api_response( chain, current_epoch, - dependent_root, + legacy_dependent_root, execution_status.is_optimistic_or_invalid(), indices, ) @@ -229,12 +234,18 @@ fn compute_historic_proposer_duties( // We can supply the genesis block root as the block root since we know that the only block that // decides its own root is the genesis block. - let dependent_root = state - .proposer_shuffling_decision_root(chain.genesis_block_root, &chain.spec) + let legacy_dependent_root = state + .legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root) .map_err(BeaconChainError::from) .map_err(warp_utils::reject::unhandled_error)?; - convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices) + convert_to_api_response( + chain, + epoch, + legacy_dependent_root, + execution_optimistic, + indices, + ) } /// Converts the internal representation of proposer duties into one that is compatible with the diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 5b016a7de4..a9de737d65 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1017,10 +1017,9 @@ async fn proposer_duties_with_gossip_tolerance() { assert_eq!( proposer_duties_tolerant_current_epoch.dependent_root, head_state - .proposer_shuffling_decision_root_at_epoch( + .legacy_proposer_shuffling_decision_root_at_epoch( tolerant_current_epoch, head_block_root, - spec ) .unwrap() ); diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 1bd4927fe8..9c4e50dc61 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -911,6 +911,22 @@ impl BeaconState { } } + /// Returns the block root at the last slot of `epoch - 1`. + /// + /// This can be deleted after Glamsterdam and the removal of the v1 proposer duties endpoint. + pub fn legacy_proposer_shuffling_decision_root_at_epoch( + &self, + epoch: Epoch, + head_block_root: Hash256, + ) -> Result { + let decision_slot = epoch.saturating_sub(1u64).end_slot(E::slots_per_epoch()); + if self.slot() <= decision_slot { + Ok(head_block_root) + } else { + self.get_block_root(decision_slot).copied() + } + } + /// Returns the block root which decided the proposer shuffling for the current epoch. This root /// can be used to key this proposer shuffling. /// diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 47b9902345..8e9d438a24 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -920,7 +920,7 @@ impl Tester { let cached_head = self.harness.chain.canonical_head.cached_head(); let next_slot = cached_head.snapshot.beacon_block.slot() + 1; let next_slot_epoch = next_slot.epoch(E::slots_per_epoch()); - let (proposer_indices, decision_root, _, fork) = + let (proposer_indices, decision_root, _, _, fork) = compute_proposer_duties_from_head(next_slot_epoch, &self.harness.chain).unwrap(); let proposer_index = proposer_indices[next_slot.as_usize() % E::slots_per_epoch() as usize]; From 5d0f8a083ae3c23134e61686a7f12441c3256240 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 3 Nov 2025 00:06:06 -0800 Subject: [PATCH 29/44] Ensure custody backfill sync couples all responses before importing (#8339) Custody backfill sync has a bug when we request columns from more than one peer per batch. The fix here ensures we wait for all requests to be completed before performing verification and importing the responses. I've also added an endpoint `lighthouse/custody/backfill` that resets a nodes earliest available data column to the current epoch so that custody backfill can be triggered. This endpoint is needed to rescue any nodes that may have missing columns due to the custody backfill sync bug without requiring a full re-sync. Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Jimmy Chen Co-Authored-By: Michael Sproul --- .../beacon_chain/src/custody_context.rs | 81 +++++++++++++++++-- .../src/historical_data_columns.rs | 5 -- beacon_node/http_api/src/lib.rs | 32 ++++++++ beacon_node/http_api/src/test_utils.rs | 18 +++++ .../tests/broadcast_validation_tests.rs | 2 + beacon_node/http_api/tests/fork_tests.rs | 2 + .../http_api/tests/interactive_tests.rs | 65 +++++++++++++++ .../src/sync/custody_backfill_sync/mod.rs | 5 +- .../sync/range_data_column_batch_request.rs | 21 ++--- book/src/api_lighthouse.md | 10 +++ common/eth2/src/lighthouse.rs | 13 +++ 11 files changed, 230 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/custody_context.rs b/beacon_node/beacon_chain/src/custody_context.rs index 9a6f51174a..a5ef3ed2f6 100644 --- a/beacon_node/beacon_chain/src/custody_context.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -120,9 +120,7 @@ impl ValidatorRegistrations { let effective_epoch = (current_slot + effective_delay_slots).epoch(E::slots_per_epoch()) + 1; self.epoch_validator_custody_requirements - .entry(effective_epoch) - .and_modify(|old_custody| *old_custody = validator_custody_requirement) - .or_insert(validator_custody_requirement); + .insert(effective_epoch, validator_custody_requirement); Some((effective_epoch, validator_custody_requirement)) } else { None @@ -154,11 +152,25 @@ impl ValidatorRegistrations { }); self.epoch_validator_custody_requirements - .entry(effective_epoch) - .and_modify(|old_custody| *old_custody = latest_validator_custody) - .or_insert(latest_validator_custody); + .insert(effective_epoch, latest_validator_custody); } } + + /// Updates the `epoch -> cgc` map by pruning records before `effective_epoch` + /// while setting the `cgc` at `effective_epoch` to the latest validator custody requirement. + /// + /// This is used to restart custody backfill sync at `effective_epoch` + pub fn reset_validator_custody_requirements(&mut self, effective_epoch: Epoch) { + if let Some(latest_validator_custody_requirements) = + self.latest_validator_custody_requirement() + { + self.epoch_validator_custody_requirements + .retain(|&epoch, _| epoch >= effective_epoch); + + self.epoch_validator_custody_requirements + .insert(effective_epoch, latest_validator_custody_requirements); + }; + } } /// Given the `validator_custody_units`, return the custody requirement based on @@ -535,6 +547,14 @@ impl CustodyContext { .write() .backfill_validator_custody_requirements(effective_epoch, expected_cgc); } + + /// The node is attempting to restart custody backfill. Update the internal records so that + /// custody backfill can start backfilling at `effective_epoch`. + pub fn reset_validator_custody_requirements(&self, effective_epoch: Epoch) { + self.validator_registrations + .write() + .reset_validator_custody_requirements(effective_epoch); + } } /// Indicates that the custody group count (CGC) has increased. @@ -1491,4 +1511,53 @@ mod tests { ); } } + + #[test] + fn reset_validator_custody_requirements() { + let spec = E::default_spec(); + let minimum_cgc = 4u64; + let initial_cgc = 8u64; + let mid_cgc = 16u64; + let final_cgc = 32u64; + + // Setup: Node restart after multiple validator registrations causing CGC increases + let head_epoch = Epoch::new(20); + let epoch_and_cgc_tuples = vec![ + (Epoch::new(0), initial_cgc), + (Epoch::new(10), mid_cgc), + (head_epoch, final_cgc), + ]; + let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); + + // Backfill from epoch 20 to 9 + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(9), final_cgc); + + // Reset validator custody requirements to the latest cgc requirements at `head_epoch` up to the boundary epoch + custody_context.reset_validator_custody_requirements(head_epoch); + + // Verify epochs 0 - 19 return the minimum cgc requirement because of the validator custody requirement reset + for epoch in 0..=19 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + minimum_cgc, + ); + } + + // Verify epoch 20 returns a CGC of 32 + assert_eq!( + custody_context.custody_group_count_at_epoch(head_epoch, &spec), + final_cgc + ); + + // Rerun Backfill to epoch 20 + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(0), final_cgc); + + // Verify epochs 0 - 20 return the final cgc requirements + for epoch in 0..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + } } diff --git a/beacon_node/beacon_chain/src/historical_data_columns.rs b/beacon_node/beacon_chain/src/historical_data_columns.rs index 9304f06570..6cf947adcb 100644 --- a/beacon_node/beacon_chain/src/historical_data_columns.rs +++ b/beacon_node/beacon_chain/src/historical_data_columns.rs @@ -89,11 +89,6 @@ impl BeaconChain { .get_data_column(&block_root, &data_column.index)? .is_some() { - debug!( - block_root = ?block_root, - column_index = data_column.index, - "Skipping data column import as identical data column exists" - ); continue; } if block_root != data_column.block_root() { diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 41cd729a68..9026792b91 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -4604,6 +4604,37 @@ pub fn serve( }, ); + // POST lighthouse/custody/backfill + let post_lighthouse_custody_backfill = warp::path("lighthouse") + .and(warp::path("custody")) + .and(warp::path("backfill")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + // Calling this endpoint will trigger custody backfill once `effective_epoch`` + // is finalized. + let effective_epoch = chain + .canonical_head + .cached_head() + .head_slot() + .epoch(T::EthSpec::slots_per_epoch()) + + 1; + let custody_context = chain.data_availability_checker.custody_context(); + // Reset validator custody requirements to `effective_epoch` with the latest + // cgc requiremnets. + custody_context.reset_validator_custody_requirements(effective_epoch); + // Update `DataColumnCustodyInfo` to reflect the custody change. + chain.update_data_column_custody_info(Some( + effective_epoch.start_slot(T::EthSpec::slots_per_epoch()), + )); + Ok(()) + }) + }, + ); + // GET lighthouse/analysis/block_rewards let get_lighthouse_block_rewards = warp::path("lighthouse") .and(warp::path("analysis")) @@ -4963,6 +4994,7 @@ pub fn serve( .uor(post_lighthouse_compaction) .uor(post_lighthouse_add_peer) .uor(post_lighthouse_remove_peer) + .uor(post_lighthouse_custody_backfill) .recover(warp_utils::reject::handle_rejection), ), ) diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index fe9e0dff70..27e2a27d35 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,6 +1,7 @@ use crate::{Config, Context}; use beacon_chain::{ BeaconChain, BeaconChainTypes, + custody_context::NodeCustodyType, test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, }; use beacon_processor::{ @@ -67,6 +68,20 @@ impl InteractiveTester { None, Config::default(), true, + NodeCustodyType::Fullnode, + ) + .await + } + + pub async fn new_supernode(spec: Option, validator_count: usize) -> Self { + Self::new_with_initializer_and_mutator( + spec, + validator_count, + None, + None, + Config::default(), + true, + NodeCustodyType::Supernode, ) .await } @@ -78,6 +93,7 @@ impl InteractiveTester { mutator: Option>, config: Config, use_mock_builder: bool, + node_custody_type: NodeCustodyType, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec.map(Arc::new)) @@ -93,6 +109,8 @@ impl InteractiveTester { .fresh_ephemeral_store() }; + harness_builder = harness_builder.node_custody_type(node_custody_type); + // Add a mutator for the beacon chain builder which will be called in // `HarnessBuilder::build`. if let Some(mutator) = mutator { diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 9427f6fdf3..82723c2b40 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,3 +1,4 @@ +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::test_spec; use beacon_chain::{ GossipVerifiedBlock, IntoGossipVerifiedBlock, WhenSlotSkipped, @@ -1956,6 +1957,7 @@ pub async fn duplicate_block_status_code() { ..Config::default() }, true, + NodeCustodyType::Fullnode, ) .await; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 62a3461276..50cf866b6a 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,4 +1,5 @@ //! Tests for API behaviour across fork boundaries. +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ StateSkipConfig, test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee}, @@ -426,6 +427,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { None, Default::default(), true, + NodeCustodyType::Fullnode, ) .await; let harness = &tester.harness; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index a9de737d65..83cb70a7a3 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,4 +1,5 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ ChainConfig, chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, @@ -76,6 +77,7 @@ async fn state_by_root_pruned_from_fork_choice() { None, Default::default(), false, + NodeCustodyType::Fullnode, ) .await; @@ -433,6 +435,7 @@ pub async fn proposer_boost_re_org_test( })), Default::default(), false, + NodeCustodyType::Fullnode, ) .await; let harness = &tester.harness; @@ -1049,6 +1052,68 @@ async fn proposer_duties_with_gossip_tolerance() { ); } +// Test that a request to `lighthouse/custody/backfill` succeeds by verifying that `CustodyContext` and `DataColumnCustodyInfo` +// have been updated with the correct values. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn lighthouse_restart_custody_backfill() { + let spec = test_spec::(); + + // Skip pre-Fulu. + if !spec.is_fulu_scheduled() { + return; + } + + let validator_count = 24; + + let tester = InteractiveTester::::new_supernode(Some(spec), validator_count).await; + let harness = &tester.harness; + let spec = &harness.spec; + let client = &tester.client; + let min_cgc = spec.custody_requirement; + let max_cgc = spec.number_of_custody_groups; + + let num_blocks = 2 * E::slots_per_epoch(); + + let custody_context = harness.chain.data_availability_checker.custody_context(); + + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await; + + let cgc_at_head = custody_context.custody_group_count_at_head(spec); + let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch(); + + assert_eq!(cgc_at_head, max_cgc); + assert_eq!(earliest_data_column_epoch, None); + + custody_context + .update_and_backfill_custody_count_at_epoch(harness.chain.epoch().unwrap(), cgc_at_head); + client.post_lighthouse_custody_backfill().await.unwrap(); + + let cgc_at_head = custody_context.custody_group_count_at_head(spec); + let cgc_at_previous_epoch = + custody_context.custody_group_count_at_epoch(harness.chain.epoch().unwrap() - 1, spec); + let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch(); + + // `DataColumnCustodyInfo` should have been updated to the head epoch + assert_eq!( + earliest_data_column_epoch, + Some(harness.chain.epoch().unwrap() + 1) + ); + // Cgc requirements should have stayed the same at head + assert_eq!(cgc_at_head, max_cgc); + // Cgc requirements at the previous epoch should be `min_cgc` + // This allows for custody backfill to re-fetch columns for this epoch. + assert_eq!(cgc_at_previous_epoch, min_cgc); +} + // Test that a request for next epoch proposer duties suceeds when the current slot clock is within // gossip clock disparity (500ms) of the new epoch. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] diff --git a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs index 5c5505083f..bb2c6799f1 100644 --- a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs @@ -382,11 +382,9 @@ impl CustodyBackFillSync { return None; }; - let mut missing_columns = HashSet::new(); - // Skip all batches (Epochs) that don't have missing columns. for epoch in Epoch::range_inclusive_rev(self.to_be_downloaded, column_da_boundary) { - missing_columns = self.beacon_chain.get_missing_columns_for_epoch(epoch); + let missing_columns = self.beacon_chain.get_missing_columns_for_epoch(epoch); if !missing_columns.is_empty() { self.to_be_downloaded = epoch; @@ -445,6 +443,7 @@ impl CustodyBackFillSync { self.include_next_batch() } Entry::Vacant(entry) => { + let missing_columns = self.beacon_chain.get_missing_columns_for_epoch(batch_id); entry.insert(BatchInfo::new( &batch_id, CUSTODY_BACKFILL_EPOCHS_PER_BATCH, diff --git a/beacon_node/network/src/sync/range_data_column_batch_request.rs b/beacon_node/network/src/sync/range_data_column_batch_request.rs index 542d99d97c..72e2fb2d5b 100644 --- a/beacon_node/network/src/sync/range_data_column_batch_request.rs +++ b/beacon_node/network/src/sync/range_data_column_batch_request.rs @@ -70,16 +70,17 @@ impl RangeDataColumnBatchRequest { HashMap::new(); let mut column_to_peer_id: HashMap = HashMap::new(); - for column in self - .requests - .values() - .filter_map(|req| req.to_finished()) - .flatten() - { - received_columns_for_slot - .entry(column.slot()) - .or_default() - .push(column.clone()); + for req in self.requests.values() { + let Some(columns) = req.to_finished() else { + return None; + }; + + for column in columns { + received_columns_for_slot + .entry(column.slot()) + .or_default() + .push(column.clone()); + } } // Note: this assumes that only 1 peer is responsible for a column diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index 2e694989f9..f804cb9df2 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -447,6 +447,16 @@ indicating that all states with slots `>= 0` are available, i.e., full state his on the specific meanings of these fields see the docs on [Checkpoint Sync](./advanced_checkpoint_sync.md#how-to-run-an-archived-node). +## `/lighthouse/custody/backfill` + +Starts a custody backfill sync from the next epoch with the node's latest custody requirements. The sync won't begin immediately, it waits until the next epoch is finalized before triggering. + +This endpoint should only be used to fix nodes that may have partial custody columns due to a prior backfill bug (present in v8.0.0-rc.2). Use with caution as it re-downloads all historic custody data columns and may consume significant bandwidth. + +```bash +curl -X POST "http://localhost:5052/lighthouse/custody/backfill" +``` + ## `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index f65b5a07b6..4f9a049e44 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -208,6 +208,19 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `POST lighthouse/custody/backfill` + pub async fn post_lighthouse_custody_backfill(&self) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("custody") + .push("backfill"); + + self.post(path, &()).await + } + /* * Note: * From e3ee7febce64c1b5a85c3ab0be0619571ee92d58 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 4 Nov 2025 11:34:47 +1100 Subject: [PATCH 30/44] Release v8.0.0 (#8352) N/A Includes the following unmerged PRs: - #8344 - #8335 - #8339 This PR should be merged after all above PRs are merged. Co-Authored-By: Jimmy Chen Co-Authored-By: Jimmy Chen --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1efb1fbc70..7c58274598 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -918,7 +918,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -1193,7 +1193,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "beacon_node", "bytes", @@ -5064,7 +5064,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -5574,7 +5574,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 56c2fb410c..6a54d3342e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "8.0.0-rc.2" +version = "8.0.0" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index bd7b37926f..a0965fa548 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v8.0.0-rc.2-", - fallback = "Lighthouse/v8.0.0-rc.2" + prefix = "Lighthouse/v8.0.0-", + fallback = "Lighthouse/v8.0.0" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "8.0.0-rc.2" + "8.0.0" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 2698073b5f..6b7aeb886c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "8.0.0-rc.2" +version = "8.0.0" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index a3240c6d7c..0d4129817a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "8.0.0-rc.2" +version = "8.0.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From bc86dc09e5d8ebadec0ac89a8cb51f77dc24caf2 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 4 Nov 2025 13:40:44 +1100 Subject: [PATCH 31/44] Reduce number of blobs used in tests to speed up CI (#8194) `beacon-chain-tests` is now regularly taking 1h+ on CI since Fulu fork was added. This PR attemtpts to reduce the test time by bringing down the number of blobs generated in tests - instead of generating 0..max_blobs, the generator now generates 0..1 blobs by default, and this can be modified by setting `harness.execution_block_generator.set_min_blob_count(n)`. Note: The blobs are pre-generated and doesn't require too much CPU to generate however processing a larger number of them on the beacon chain does take a lot of time. This PR also include a few other small improvements - Our slowest test (`chain_segment_varying_chunk_size`) runs 3x faster in Fulu just by reusing chain segments - Avoid re-running fork specific tests on all forks - Fix a bunch of tests that depends on the harness's existing random blob generation, which is fragile beacon chain test time on test machine is **~2x** faster: ### `unstable` ``` Summary [ 751.586s] 291 tests run: 291 passed (13 slow), 0 skipped ``` ### this branch ``` Summary [ 373.792s] 291 tests run: 291 passed (2 slow), 0 skipped ``` The next set of tests to optimise is the ones that use [`get_chain_segment`](https://github.com/sigp/lighthouse/blob/77a9af96de0f693127055e381ece3e98dceea0a8/beacon_node/beacon_chain/tests/block_verification.rs#L45), as it by default build 320 blocks with supernode - an easy optimisation would be to build these blocks with cgc = 8 for tests that only require fullnodes. Co-Authored-By: Jimmy Chen Co-Authored-By: Jimmy Chen --- .../overflow_lru_cache.rs | 2 +- beacon_node/beacon_chain/src/kzg_utils.rs | 12 +- beacon_node/beacon_chain/src/test_utils.rs | 126 ++++++------------ .../beacon_chain/tests/block_verification.rs | 13 +- beacon_node/beacon_chain/tests/events.rs | 32 ++++- beacon_node/beacon_chain/tests/store_tests.rs | 47 +++++-- .../execution_layer/src/engine_api/http.rs | 3 +- .../test_utils/execution_block_generator.rs | 13 +- .../src/test_utils/mock_execution_layer.rs | 1 - .../execution_layer/src/test_utils/mod.rs | 9 +- .../tests/broadcast_validation_tests.rs | 37 +++-- beacon_node/http_api/tests/tests.rs | 13 +- .../src/sync/block_sidecar_coupling.rs | 15 +-- beacon_node/network/src/sync/tests/lookups.rs | 6 +- .../generate_random_block_and_blobs.rs | 4 +- lcli/src/mock_el.rs | 2 +- testing/node_test_rig/src/lib.rs | 8 +- 17 files changed, 171 insertions(+), 172 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 402dac1fa8..5e6322ae95 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -1279,7 +1279,7 @@ mod pending_components_tests { let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let spec = test_spec::(); let (block, blobs_vec) = - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng, &spec); + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); let max_len = spec.max_blobs_per_block(block.epoch()) as usize; let mut blobs: RuntimeFixedVector>>> = RuntimeFixedVector::default(max_len); diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 18e14587a5..200774ebe4 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -468,7 +468,7 @@ mod test { #[track_caller] fn test_validate_data_columns(kzg: &Kzg, spec: &ChainSpec) { - let num_of_blobs = 6; + let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); @@ -494,7 +494,8 @@ mod test { #[track_caller] fn test_build_data_columns(kzg: &Kzg, spec: &ChainSpec) { - let num_of_blobs = 6; + // Using at least 2 blobs to make sure we're arranging the data columns correctly. + let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); @@ -534,6 +535,7 @@ mod test { #[track_caller] fn test_reconstruct_data_columns(kzg: &Kzg, spec: &ChainSpec) { + // Using at least 2 blobs to make sure we're arranging the data columns correctly. let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); @@ -557,6 +559,7 @@ mod test { #[track_caller] fn test_reconstruct_data_columns_unordered(kzg: &Kzg, spec: &ChainSpec) { + // Using at least 2 blobs to make sure we're arranging the data columns correctly. let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); @@ -578,7 +581,7 @@ mod test { #[track_caller] fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) { - let num_of_blobs = 6; + let num_of_blobs = 3; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); @@ -588,7 +591,8 @@ mod test { // Now reconstruct let signed_blinded_block = signed_block.into(); - let blob_indices = vec![3, 4, 5]; + // Using at least 2 blobs to make sure we're arranging the data columns correctly. + let blob_indices = vec![1, 2]; let reconstructed_blobs = reconstruct_blobs( kzg, &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c1d1d9de67..9601618e92 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -81,6 +81,10 @@ pub const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] = // a different value. pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::MAX; +// Minimum and maximum number of blobs to generate in each slot when using the `NumBlobs::Random` option (default). +const DEFAULT_MIN_BLOBS: usize = 1; +const DEFAULT_MAX_BLOBS: usize = 2; + static KZG: LazyLock> = LazyLock::new(|| { let kzg = Kzg::new_from_trusted_setup(&get_trusted_setup()).expect("should create kzg"); Arc::new(kzg) @@ -172,23 +176,28 @@ fn make_rng() -> Mutex { Mutex::new(StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64)) } -/// Return a `ChainSpec` suitable for test usage. -/// -/// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment -/// variable. Otherwise use the default spec. -pub fn test_spec() -> ChainSpec { - let mut spec = if cfg!(feature = "fork_from_env") { +pub fn fork_name_from_env() -> Option { + if cfg!(feature = "fork_from_env") { let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { panic!( "{} env var must be defined when using fork_from_env: {:?}", FORK_NAME_ENV_VAR, e ) }); - let fork = ForkName::from_str(fork_name.as_str()).unwrap(); - fork.make_genesis_spec(E::default_spec()) + Some(ForkName::from_str(fork_name.as_str()).unwrap()) } else { - E::default_spec() - }; + None + } +} + +/// Return a `ChainSpec` suitable for test usage. +/// +/// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment +/// variable. Otherwise use the default spec. +pub fn test_spec() -> ChainSpec { + let mut spec = fork_name_from_env() + .map(|fork| fork.make_genesis_spec(E::default_spec())) + .unwrap_or_else(|| E::default_spec()); // Set target aggregators to a high value by default. spec.target_aggregators_per_committee = DEFAULT_TARGET_AGGREGATORS; @@ -3245,96 +3254,49 @@ pub enum NumBlobs { None, } +macro_rules! add_blob_transactions { + ($message:expr, $payload_type:ty, $num_blobs:expr, $rng:expr, $fork_name:expr) => {{ + let num_blobs = match $num_blobs { + NumBlobs::Random => $rng.random_range(DEFAULT_MIN_BLOBS..=DEFAULT_MAX_BLOBS), + NumBlobs::Number(n) => n, + NumBlobs::None => 0, + }; + let (bundle, transactions) = + execution_layer::test_utils::generate_blobs::(num_blobs, $fork_name).unwrap(); + + let payload: &mut $payload_type = &mut $message.body.execution_payload; + payload.execution_payload.transactions = <_>::default(); + for tx in Vec::from(transactions) { + payload.execution_payload.transactions.push(tx).unwrap(); + } + $message.body.blob_kzg_commitments = bundle.commitments.clone(); + bundle + }}; +} + pub fn generate_rand_block_and_blobs( fork_name: ForkName, num_blobs: NumBlobs, rng: &mut impl Rng, - spec: &ChainSpec, ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); - let max_blobs = spec.max_blobs_per_block(block.epoch()) as usize; let mut blob_sidecars = vec![]; let bundle = match block { SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb { ref mut message, .. - }) => { - // Get either zero blobs or a random number of blobs between 1 and Max Blobs. - let payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; - let num_blobs = match num_blobs { - NumBlobs::Random => rng.random_range(1..=max_blobs), - NumBlobs::Number(n) => n, - NumBlobs::None => 0, - }; - let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); - - payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { - payload.execution_payload.transactions.push(tx).unwrap(); - } - message.body.blob_kzg_commitments = bundle.commitments.clone(); - bundle - } + }) => add_blob_transactions!(message, FullPayloadDeneb, num_blobs, rng, fork_name), SignedBeaconBlock::Electra(SignedBeaconBlockElectra { ref mut message, .. - }) => { - // Get either zero blobs or a random number of blobs between 1 and Max Blobs. - let payload: &mut FullPayloadElectra = &mut message.body.execution_payload; - let num_blobs = match num_blobs { - NumBlobs::Random => rng.random_range(1..=max_blobs), - NumBlobs::Number(n) => n, - NumBlobs::None => 0, - }; - let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); - payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { - payload.execution_payload.transactions.push(tx).unwrap(); - } - message.body.blob_kzg_commitments = bundle.commitments.clone(); - bundle - } + }) => add_blob_transactions!(message, FullPayloadElectra, num_blobs, rng, fork_name), SignedBeaconBlock::Fulu(SignedBeaconBlockFulu { ref mut message, .. - }) => { - // Get either zero blobs or a random number of blobs between 1 and Max Blobs. - let payload: &mut FullPayloadFulu = &mut message.body.execution_payload; - let num_blobs = match num_blobs { - NumBlobs::Random => rng.random_range(1..=max_blobs), - NumBlobs::Number(n) => n, - NumBlobs::None => 0, - }; - let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); - payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { - payload.execution_payload.transactions.push(tx).unwrap(); - } - message.body.blob_kzg_commitments = bundle.commitments.clone(); - bundle - } + }) => add_blob_transactions!(message, FullPayloadFulu, num_blobs, rng, fork_name), SignedBeaconBlock::Gloas(SignedBeaconBlockGloas { ref mut message, .. - }) => { - // Get either zero blobs or a random number of blobs between 1 and Max Blobs. - let payload: &mut FullPayloadGloas = &mut message.body.execution_payload; - let num_blobs = match num_blobs { - NumBlobs::Random => rng.random_range(1..=max_blobs), - NumBlobs::Number(n) => n, - NumBlobs::None => 0, - }; - let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); - payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { - payload.execution_payload.transactions.push(tx).unwrap(); - } - message.body.blob_kzg_commitments = bundle.commitments.clone(); - bundle - } + }) => add_blob_transactions!(message, FullPayloadGloas, num_blobs, rng, fork_name), _ => return (block, blob_sidecars), }; @@ -3375,7 +3337,7 @@ pub fn generate_rand_block_and_data_columns( SignedBeaconBlock>, DataColumnSidecarList, ) { - let (block, _blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); + let (block, _blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng); let data_columns = generate_data_column_sidecars_from_block(&block, spec); (block, data_columns) } diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 3d1fa8f4af..881885cef2 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -297,19 +297,20 @@ async fn chain_segment_full_segment() { #[tokio::test] async fn chain_segment_varying_chunk_size() { - for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .collect(); + + for chunk_size in &[1, 2, 31, 32, 33] { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); - let (chain_segment, chain_segment_blobs) = get_chain_segment().await; - let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); harness .chain .slot_clock .set_slot(blocks.last().unwrap().slot().as_u64()); - for chunk in blocks.chunks(*chunk_size) { + for chunk in blocks.clone().chunks(*chunk_size) { harness .chain .process_chain_segment(chunk.to_vec(), NotifyExecutionLayer::Yes) diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 466058eea3..86bdb03daf 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -1,20 +1,26 @@ use beacon_chain::blob_verification::GossipVerifiedBlob; use beacon_chain::data_column_verification::GossipVerifiedDataColumn; -use beacon_chain::test_utils::{BeaconChainHarness, generate_data_column_sidecars_from_block}; +use beacon_chain::test_utils::{ + BeaconChainHarness, fork_name_from_env, generate_data_column_sidecars_from_block, test_spec, +}; use eth2::types::{EventKind, SseBlobSidecar, SseDataColumnSidecar}; use rand::SeedableRng; use rand::rngs::StdRng; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; use types::test_utils::TestRandom; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkName, MinimalEthSpec, Slot}; +use types::{BlobSidecar, DataColumnSidecar, EthSpec, MinimalEthSpec, Slot}; type E = MinimalEthSpec; /// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. #[tokio::test] async fn blob_sidecar_event_on_process_gossip_blob() { - let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec())); + if fork_name_from_env().is_some_and(|f| !f.deneb_enabled() || f.fulu_enabled()) { + return; + }; + + let spec = Arc::new(test_spec::()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) @@ -48,7 +54,11 @@ async fn blob_sidecar_event_on_process_gossip_blob() { /// Verifies that a data column event is emitted when a gossip verified data column is received via gossip or the publish block API. #[tokio::test] async fn data_column_sidecar_event_on_process_gossip_data_column() { - let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); + if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) { + return; + }; + + let spec = Arc::new(test_spec::()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) @@ -93,7 +103,11 @@ async fn data_column_sidecar_event_on_process_gossip_data_column() { /// Verifies that a blob event is emitted when blobs are received via RPC. #[tokio::test] async fn blob_sidecar_event_on_process_rpc_blobs() { - let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec())); + if fork_name_from_env().is_some_and(|f| !f.deneb_enabled() || f.fulu_enabled()) { + return; + }; + + let spec = Arc::new(test_spec::()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) @@ -112,7 +126,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let slot = head_state.slot() + 1; let ((signed_block, opt_blobs), _) = harness.make_block(head_state, slot).await; let (kzg_proofs, blobs) = opt_blobs.unwrap(); - assert!(blobs.len() > 2); + assert_eq!(blobs.len(), 2); let blob_1 = Arc::new(BlobSidecar::new(0, blobs[0].clone(), &signed_block, kzg_proofs[0]).unwrap()); @@ -144,7 +158,11 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { #[tokio::test] async fn data_column_sidecar_event_on_process_rpc_columns() { - let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); + if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) { + return; + }; + + let spec = Arc::new(test_spec::()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec.clone()) .deterministic_keypairs(8) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0a261e36ce..41c8f905be 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,11 +7,11 @@ use beacon_chain::custody_context::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::historical_data_columns::HistoricalDataColumnError; use beacon_chain::schema_change::migrate_schema; -use beacon_chain::test_utils::SyncCommitteeStrategy; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, get_kzg, mock_execution_layer_from_parts, test_spec, }; +use beacon_chain::test_utils::{SyncCommitteeStrategy, fork_name_from_env}; use beacon_chain::{ BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, @@ -3211,12 +3211,13 @@ async fn test_import_historical_data_columns_batch() { for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_some()); - for data_column in data_columns.unwrap() { + for data_column in data_columns.unwrap_or_default() { data_columns_list.push(data_column); } } + assert!(!data_columns_list.is_empty()); + harness .extend_chain( (E::slots_per_epoch() * 4) as usize, @@ -3255,8 +3256,18 @@ async fn test_import_historical_data_columns_batch() { for block in block_root_iter { let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_some()) + if !harness + .get_block(block_root.into()) + .unwrap() + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .is_empty() + { + let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + assert!(data_columns.is_some()) + }; } } @@ -3290,9 +3301,8 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_some()); - for data_column in data_columns.unwrap() { + for data_column in data_columns.unwrap_or_default() { let mut data_column = (*data_column).clone(); if data_column.index % 2 == 0 { data_column.signed_block_header.message.body_root = Hash256::ZERO; @@ -3301,6 +3311,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { data_columns_list.push(Arc::new(data_column)); } } + assert!(!data_columns_list.is_empty()); harness .extend_chain( @@ -3347,7 +3358,11 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { // be imported. #[tokio::test] async fn test_import_historical_data_columns_batch_no_block_found() { - let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) { + return; + }; + + let spec = test_spec::(); let db_path = tempdir().unwrap(); let store = get_store_generic(&db_path, StoreConfig::default(), spec); let start_slot = Slot::new(1); @@ -3374,15 +3389,16 @@ async fn test_import_historical_data_columns_batch_no_block_found() { for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_some()); - for data_column in data_columns.unwrap() { + for data_column in data_columns.unwrap_or_default() { let mut data_column = (*data_column).clone(); data_column.signed_block_header.message.body_root = Hash256::ZERO; data_columns_list.push(Arc::new(data_column)); } } + assert!(!data_columns_list.is_empty()); + harness .extend_chain( (E::slots_per_epoch() * 4) as usize, @@ -4108,6 +4124,12 @@ async fn deneb_prune_blobs_no_finalization() { /// Check that blob pruning does not fail trying to prune across the fork boundary. #[tokio::test] async fn prune_blobs_across_fork_boundary() { + // This test covers earlier forks and only need to be executed once. + // Note: this test is quite expensive (building a chain to epoch 15) and we should revisit this + if fork_name_from_env() != Some(ForkName::latest_stable()) { + return; + } + let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec()); let deneb_fork_epoch = Epoch::new(4); @@ -4124,6 +4146,7 @@ async fn prune_blobs_across_fork_boundary() { let store = get_store_generic(&db_path, StoreConfig::default(), spec); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + harness.execution_block_generator().set_min_blob_count(1); let blocks_to_deneb_finalization = E::slots_per_epoch() * 7; let blocks_to_electra_finalization = E::slots_per_epoch() * 4; @@ -4279,7 +4302,7 @@ async fn prune_blobs_across_fork_boundary() { // Fulu fork epochs // Pruning should have been triggered assert!(store.get_blob_info().oldest_blob_slot <= Some(oldest_slot)); - // Oldest blost slot should never be greater than the first fulu slot + // Oldest blob slot should never be greater than the first fulu slot let fulu_first_slot = fulu_fork_epoch.start_slot(E::slots_per_epoch()); assert!(store.get_blob_info().oldest_blob_slot <= Some(fulu_first_slot)); // Blobs should not exist post-Fulu @@ -4764,7 +4787,7 @@ async fn fulu_prune_data_columns_margin_test(margin: u64) { check_data_column_existence(&harness, oldest_data_column_slot, harness.head_slot(), true); } -/// Check tat there are data column sidecars (or not) at every slot in the range. +/// Check that there are data column sidecars (or not) at every slot in the range. fn check_data_column_existence( harness: &TestHarness, start_slot: Slot, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 74fb078510..a8dbed34ce 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -1479,8 +1479,7 @@ mod test { impl Tester { pub fn new(with_auth: bool) -> Self { - let spec = Arc::new(MainnetEthSpec::default_spec()); - let server = MockServer::unit_testing(spec); + let server = MockServer::unit_testing(); let rpc_url = SensitiveUrl::parse(&server.url()).unwrap(); let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 5652e557f2..1c1e307417 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -13,6 +13,7 @@ use rand::{Rng, SeedableRng, rngs::StdRng}; use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_types::VariableList; +use std::cmp::max; use std::collections::HashMap; use std::sync::Arc; use tree_hash::TreeHash; @@ -157,7 +158,6 @@ pub struct ExecutionBlockGenerator { pub blobs_bundles: HashMap>, pub kzg: Option>, rng: Arc>, - spec: Arc, } fn make_rng() -> Arc> { @@ -177,7 +177,6 @@ impl ExecutionBlockGenerator { prague_time: Option, osaka_time: Option, amsterdam_time: Option, - spec: Arc, kzg: Option>, ) -> Self { let mut generator = Self { @@ -200,7 +199,6 @@ impl ExecutionBlockGenerator { blobs_bundles: <_>::default(), kzg, rng: make_rng(), - spec, }; generator.insert_pow_block(0).unwrap(); @@ -732,11 +730,10 @@ impl ExecutionBlockGenerator { let fork_name = execution_payload.fork_name(); if fork_name.deneb_enabled() { - // get random number between 0 and Max Blobs + // get random number between 0 and 1 blobs by default + // For tests that need higher blob count, consider adding a `set_max_blob_count` method let mut rng = self.rng.lock(); - // TODO(EIP-7892): see FIXME below - // FIXME: this will break with BPO forks. This function needs to calculate the epoch based on block timestamp.. - let max_blobs = self.spec.max_blobs_per_block_within_fork(fork_name) as usize; + let max_blobs = max(1, self.min_blobs_count); let num_blobs = rng.random_range(self.min_blobs_count..=max_blobs); let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { @@ -978,7 +975,6 @@ mod test { const TERMINAL_DIFFICULTY: u64 = 10; const TERMINAL_BLOCK: u64 = 10; const DIFFICULTY_INCREMENT: u64 = 1; - let spec = Arc::new(MainnetEthSpec::default_spec()); let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( Uint256::from(TERMINAL_DIFFICULTY), @@ -989,7 +985,6 @@ mod test { None, None, None, - spec, None, ); diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 9e587d4e59..73c998956c 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -63,7 +63,6 @@ impl MockExecutionLayer { prague_time, osaka_time, amsterdam_time, - spec.clone(), kzg, ); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 712c773dda..8f12971560 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,7 +22,7 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::{Arc, LazyLock}; use tokio::{runtime, sync::oneshot}; use tracing::info; -use types::{ChainSpec, EthSpec, ExecutionBlockHash, Uint256}; +use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{Filter, Rejection, http::StatusCode}; use crate::EngineCapabilities; @@ -114,7 +114,7 @@ pub struct MockServer { } impl MockServer { - pub fn unit_testing(chain_spec: Arc) -> Self { + pub fn unit_testing() -> Self { Self::new( &runtime::Handle::current(), JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), @@ -126,7 +126,6 @@ impl MockServer { None, // FIXME(electra): should this be the default? None, // FIXME(fulu): should this be the default? None, // FIXME(gloas): should this be the default? - chain_spec, None, ) } @@ -134,7 +133,6 @@ impl MockServer { pub fn new_with_config( handle: &runtime::Handle, config: MockExecutionConfig, - spec: Arc, kzg: Option>, ) -> Self { create_test_tracing_subscriber(); @@ -161,7 +159,6 @@ impl MockServer { prague_time, osaka_time, amsterdam_time, - spec, kzg, ); @@ -226,7 +223,6 @@ impl MockServer { prague_time: Option, osaka_time: Option, amsterdam_time: Option, - spec: Arc, kzg: Option>, ) -> Self { Self::new_with_config( @@ -243,7 +239,6 @@ impl MockServer { osaka_time, amsterdam_time, }, - spec, kzg, ) } diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 9427f6fdf3..1b79c13d76 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -822,6 +822,14 @@ pub async fn blinded_gossip_invalid() { tester.harness.advance_slot(); + // Ensure there's at least one blob in the block, so we don't run into failures when the + // block generator logic changes, as different errors could be returned: + // * Invalidity of blocks: `NotFinalizedDescendant` + // * Invalidity of blobs: `ParentUnknown` + tester + .harness + .execution_block_generator() + .set_min_blob_count(1); let (blinded_block, _) = tester .harness .make_blinded_block_with_modifier(chain_state_before, slot, |b| { @@ -837,21 +845,20 @@ pub async fn blinded_gossip_invalid() { assert!(response.is_err()); let error_response: eth2::Error = response.err().unwrap(); + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + let pre_finalized_block_root = Hash256::zero(); - /* mandated by Beacon API spec */ - if tester.harness.spec.is_fulu_scheduled() { - // XXX: this should be a 400 but is a 500 due to the mock-builder being janky - assert_eq!( - error_response.status(), - Some(StatusCode::INTERNAL_SERVER_ERROR) - ); + let expected_error_msg = if tester.harness.spec.is_fulu_scheduled() { + format!( + "BAD_REQUEST: NotFinalizedDescendant {{ block_parent_root: {pre_finalized_block_root:?} }}" + ) } else { - assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error( - error_response, - format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}"), - ); - } + // Since Deneb, the invalidity of the blobs will be detected prior to the invalidity of the + // block. + format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}") + }; + + assert_server_message_error(error_response, expected_error_msg); } /// Process a blinded block that is invalid, but valid on gossip. @@ -1647,6 +1654,10 @@ pub async fn block_seen_on_gossip_with_some_blobs_or_columns() { ) .await; tester.harness.advance_slot(); + tester + .harness + .execution_block_generator() + .set_min_blob_count(2); let slot_a = Slot::new(num_initial); let slot_b = slot_a + 1; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 3b69430efc..6fb5a8ed8a 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -178,6 +178,9 @@ impl ApiTester { "precondition: current slot is one after head" ); + // Set a min blob count for the next block for get_blobs testing + harness.execution_block_generator().set_min_blob_count(2); + let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; @@ -1869,7 +1872,7 @@ impl ApiTester { } pub async fn test_get_blob_sidecars(self, use_indices: bool) -> Self { - let block_id = BlockId(CoreBlockId::Finalized); + let block_id = BlockId(CoreBlockId::Head); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let num_blobs = block.num_expected_blobs(); @@ -1902,7 +1905,7 @@ impl ApiTester { } pub async fn test_get_blobs(self, versioned_hashes: bool) -> Self { - let block_id = BlockId(CoreBlockId::Finalized); + let block_id = BlockId(CoreBlockId::Head); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let num_blobs = block.num_expected_blobs(); @@ -1940,7 +1943,7 @@ impl ApiTester { } pub async fn test_get_blobs_post_fulu_full_node(self, versioned_hashes: bool) -> Self { - let block_id = BlockId(CoreBlockId::Finalized); + let block_id = BlockId(CoreBlockId::Head); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); @@ -7867,6 +7870,8 @@ async fn get_blobs_post_fulu_supernode() { config.spec.fulu_fork_epoch = Some(Epoch::new(0)); ApiTester::new_from_config(config) + .await + .test_post_beacon_blocks_valid() .await // We can call the same get_blobs function in this test // because the function will call get_blobs_by_versioned_hashes which handles peerDAS post-Fulu @@ -7887,6 +7892,8 @@ async fn get_blobs_post_fulu_full_node() { config.spec.fulu_fork_epoch = Some(Epoch::new(0)); ApiTester::new_from_config(config) + .await + .test_post_beacon_blocks_valid() .await .test_get_blobs_post_fulu_full_node(false) .await diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index d5858c23f1..01929cbf90 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -517,11 +517,10 @@ mod tests { #[test] fn no_blobs_into_responses() { - let spec = test_spec::(); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { - generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec) + generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng) .0 .into() }) @@ -540,19 +539,13 @@ mod tests { #[test] fn empty_blobs_into_responses() { - let spec = test_spec::(); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { // Always generate some blobs. - generate_rand_block_and_blobs::( - ForkName::Deneb, - NumBlobs::Number(3), - &mut rng, - &spec, - ) - .0 - .into() + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Number(3), &mut rng) + .0 + .into() }) .collect::>>>(); diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index fc64186175..63bcd176f5 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -194,7 +194,7 @@ impl TestRig { ) -> (SignedBeaconBlock, Vec>) { let fork_name = self.fork_name; let rng = &mut self.rng; - generate_rand_block_and_blobs::(fork_name, num_blobs, rng, &self.spec) + generate_rand_block_and_blobs::(fork_name, num_blobs, rng) } fn rand_block_and_data_columns( @@ -1146,10 +1146,8 @@ impl TestRig { #[test] fn stable_rng() { - let spec = types::MainnetEthSpec::default_spec(); let mut rng = XorShiftRng::from_seed([42; 16]); - let (block, _) = - generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec); + let (block, _) = generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng); assert_eq!( block.canonical_root(), Hash256::from_slice( diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs index 0f52e485a8..8f4908291e 100644 --- a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -77,7 +77,7 @@ mod test { #[test] fn test_verify_blob_inclusion_proof() { let (_block, blobs) = - generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut rng()); + generate_rand_block_and_blobs::(ForkName::Deneb, 2, &mut rng()); for blob in blobs { assert!(blob.verify_blob_sidecar_inclusion_proof()); } @@ -115,7 +115,7 @@ mod test { #[test] fn test_verify_blob_inclusion_proof_invalid() { let (_block, blobs) = - generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut rng()); + generate_rand_block_and_blobs::(ForkName::Deneb, 1, &mut rng()); for mut blob in blobs { blob.kzg_commitment_inclusion_proof = FixedVector::random_for_test(&mut rng()); diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index ee6485b238..d6bdfb0d71 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -44,7 +44,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< amsterdam_time, }; let kzg = None; - let server: MockServer = MockServer::new_with_config(&handle, config, spec, kzg); + let server: MockServer = MockServer::new_with_config(&handle, config, kzg); if all_payloads_valid { eprintln!( diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index df191ed5af..e49d11ee1e 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -248,14 +248,8 @@ impl LocalExecutionNode { if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) { panic!("Failed to write jwt file {}", e); } - let spec = context.eth2_config.spec.clone(); Self { - server: MockServer::new_with_config( - &context.executor.handle().unwrap(), - config, - spec, - None, - ), + server: MockServer::new_with_config(&context.executor.handle().unwrap(), config, None), datadir, } } From 0300d4b32261e7cc3d506f6af9ba1f1e987a1f7e Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 4 Nov 2025 10:38:30 -0800 Subject: [PATCH 32/44] Fix --- beacon_node/beacon_chain/src/beacon_chain.rs | 55 +------------------ consensus/types/src/light_client_bootstrap.rs | 10 ---- 2 files changed, 1 insertion(+), 64 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 58532116e6..3568c30207 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5786,60 +5786,7 @@ impl BeaconChain { execution_payload_value, ) } - BeaconState::Gloas(_) => { - let ( - payload, - kzg_commitments, - maybe_blobs_and_proofs, - maybe_requests, - execution_payload_value, - ) = block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .deconstruct(); - - ( - BeaconBlock::Gloas(BeaconBlockGloas { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyGloas { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings - .try_into() - .map_err(BlockProductionError::SszTypesError)?, - attester_slashings: attester_slashings_electra - .try_into() - .map_err(BlockProductionError::SszTypesError)?, - attestations: attestations_electra - .try_into() - .map_err(BlockProductionError::SszTypesError)?, - deposits: deposits - .try_into() - .map_err(BlockProductionError::SszTypesError)?, - voluntary_exits: voluntary_exits - .try_into() - .map_err(BlockProductionError::SszTypesError)?, - sync_aggregate: sync_aggregate - .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: payload - .try_into() - .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes - .try_into() - .map_err(BlockProductionError::SszTypesError)?, - blob_kzg_commitments: kzg_commitments - .ok_or(BlockProductionError::InvalidPayloadFork)?, - execution_requests: maybe_requests - .ok_or(BlockProductionError::MissingExecutionRequests)?, - }, - }), - maybe_blobs_and_proofs, - execution_payload_value, - ) - } + BeaconState::Gloas(_) => todo!("Gloas block production"), }; let block = SignedBeaconBlock::from_block( diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index d73f327729..9fc51173a0 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -234,17 +234,7 @@ impl LightClientBootstrap { .try_into() .map_err(Error::SszTypesError)?, }), -<<<<<<< HEAD ForkName::Gloas => todo!("Gloas light client not implemented"), -======= - ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas { - header: LightClientHeaderGloas::block_to_light_client_header(block)?, - current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch - .try_into() - .map_err(Error::SszTypesError)?, - }), ->>>>>>> 0507eca7b4dfc6e923b02197d3f3f68bdeb1e67c }; Ok(light_client_bootstrap) From a7e89a8761cee4f3ec2081acb1605a3f0915af5d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 5 Nov 2025 13:08:46 +1100 Subject: [PATCH 33/44] Optimise `state_root_at_slot` for finalized slot (#8353) This is an optimisation targeted at Fulu networks in non-finality. While debugging on Holesky, we found that `state_root_at_slot` was being called from `prepare_beacon_proposer` a lot, for the finalized state: https://github.com/sigp/lighthouse/blob/2c9b670f5d313450252c6cb40a5ee34802d54fef/beacon_node/http_api/src/lib.rs#L3860-L3861 This was causing `prepare_beacon_proposer` calls to take upwards of 5 seconds, sometimes 10 seconds, because it would trigger _multiple_ beacon state loads in order to iterate back to the finalized slot. Ideally, loading the finalized state should be quick because we keep it cached in the state cache (technically we keep the split state, but they usually coincide). Instead we are computing the finalized state root separately (slow), and then loading the state from the cache (fast). Although it would be possible to make the API faster by removing the `state_root_at_slot` call, I believe it's simpler to change `state_root_at_slot` itself and remove the footgun. Devs rightly expect operations involving the finalized state to be fast. Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 ++++++ beacon_node/beacon_chain/tests/store_tests.rs | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 58532116e6..5ffdf951ac 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -883,6 +883,12 @@ impl BeaconChain { return Ok(None); } + // Fast-path for the split slot (which usually corresponds to the finalized slot). + let split = self.store.get_split_info(); + if request_slot == split.slot { + return Ok(Some(split.state_root)); + } + // Try an optimized path of reading the root directly from the head state. let fast_lookup: Option = self.with_head(|head| { if head.beacon_block.slot() <= request_slot { diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 25f824c19b..638c221a7f 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3155,6 +3155,10 @@ async fn weak_subjectivity_sync_test( .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); + assert_eq!( + state_root, + beacon_chain.state_root_at_slot(slot).unwrap().unwrap() + ); assert_eq!(state.slot(), slot); assert_eq!(state.canonical_root().unwrap(), state_root); } From efadbb315a09928ea0e27e29cb946fc57fb1e1c4 Mon Sep 17 00:00:00 2001 From: antondlr Date: Wed, 5 Nov 2025 04:53:01 +0100 Subject: [PATCH 34/44] Remove Windows CI jobs (#8362) Remove all Windows-related CI jobs Co-Authored-By: antondlr --- .github/workflows/release.yml | 39 +------------------------------- .github/workflows/test-suite.yml | 33 --------------------------- 2 files changed, 1 insertion(+), 71 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7c85cdd05c..f7b65f07c9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -32,8 +32,7 @@ jobs: matrix: arch: [aarch64-unknown-linux-gnu, x86_64-unknown-linux-gnu, - aarch64-apple-darwin, - x86_64-windows] + aarch64-apple-darwin] include: - arch: aarch64-unknown-linux-gnu runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} @@ -44,9 +43,6 @@ jobs: - arch: aarch64-apple-darwin runner: macos-14 profile: maxperf - - arch: x86_64-windows - runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019' }} - profile: maxperf runs-on: ${{ matrix.runner }} needs: extract-version @@ -57,19 +53,6 @@ jobs: if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable - # ============================== - # Windows dependencies - # ============================== - - - uses: KyleMayes/install-llvm-action@v1 - if: env.SELF_HOSTED_RUNNERS == 'false' && startsWith(matrix.arch, 'x86_64-windows') - with: - version: "17.0" - directory: ${{ runner.temp }}/llvm - - name: Set LIBCLANG_PATH - if: startsWith(matrix.arch, 'x86_64-windows') - run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - # ============================== # Builds # ============================== @@ -94,12 +77,7 @@ jobs: if: matrix.arch == 'aarch64-apple-darwin' run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - - name: Build Lighthouse for Windows - if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - - name: Configure GPG and create artifacts - if: startsWith(matrix.arch, 'x86_64-windows') != true env: GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }} GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} @@ -118,20 +96,6 @@ jobs: done mv *tar.gz* .. - - name: Configure GPG and create artifacts Windows - if: startsWith(matrix.arch, 'x86_64-windows') - env: - GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }} - GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} - run: | - echo $env:GPG_SIGNING_KEY | gpg --batch --import - mkdir artifacts - move $env:USERPROFILE/.cargo/bin/lighthouse.exe ./artifacts - cd artifacts - tar -czf lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz lighthouse.exe - gpg --passphrase "$env:GPG_PASSPHRASE" --batch --pinentry-mode loopback -ab lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz - move *tar.gz* .. - # ======================================================================= # Upload artifacts # This is required to share artifacts between different jobs @@ -239,7 +203,6 @@ jobs: | Apple logo | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz.asc) | | Linux logo | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz.asc) | | Raspberrypi logo | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz.asc) | - | Windows logo | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz.asc) | | | | | | | **System** | **Option** | - | **Resource** | | Docker logo | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) | diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 0201bf9ae3..0cdd8211da 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -107,38 +107,6 @@ jobs: if: env.SELF_HOSTED_RUNNERS == 'true' continue-on-error: true run: sccache --show-stats - release-tests-windows: - name: release-tests-windows - needs: [check-labels] - if: needs.check-labels.outputs.skip_ci != 'true' - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} - steps: - - uses: actions/checkout@v5 - - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' - uses: moonrepo/setup-rust@v1 - with: - channel: stable - cache-target: release - bins: cargo-nextest - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Install Foundry (anvil) - if: env.SELF_HOSTED_RUNNERS == 'false' - uses: foundry-rs/foundry-toolchain@v1 - with: - version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - - name: Install make - if: env.SELF_HOSTED_RUNNERS == 'false' - run: choco install -y make - - name: Set LIBCLANG_PATH - run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - - name: Run tests in release - run: make test-release - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats beacon-chain-tests: name: beacon-chain-tests needs: [check-labels] @@ -501,7 +469,6 @@ jobs: 'check-labels', 'target-branch-check', 'release-tests-ubuntu', - 'release-tests-windows', 'beacon-chain-tests', 'op-pool-tests', 'network-tests', From 1e10329c98c980e2fdac45de66587b9acb7a4980 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Wed, 5 Nov 2025 11:53:03 +0800 Subject: [PATCH 35/44] Update proposer-only section in the documentation (#8358) Co-Authored-By: Tan Chee Keong Co-Authored-By: Michael Sproul --- book/src/advanced_blobs.md | 4 ++-- book/src/advanced_database_migrations.md | 1 + book/src/advanced_proposer_only.md | 4 +--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/book/src/advanced_blobs.md b/book/src/advanced_blobs.md index ccc29acf26..bb989a85d8 100644 --- a/book/src/advanced_blobs.md +++ b/book/src/advanced_blobs.md @@ -2,7 +2,7 @@ With the [Fusaka](https://ethereum.org/roadmap/fusaka) upgrade, the main feature [PeerDAS](https://ethereum.org/roadmap/fusaka#peerdas) allows storing only a portion of blob data, known as data columns, thus reducing the storage and bandwidth requirements of a full node. This however also means that a full node will not be able to serve blobs after Fusaka. To continue serving blobs, run the beacon node with `--semi-supernode` or `--supernode`. Note that this comes at a significant increase in storage and bandwidth requirements, see [this blog post about PeerDAS](https://blog.sigmaprime.io/peerdas-distributed-blob-building.html) and [Fusaka bandwidth estimation](https://ethpandaops.io/posts/fusaka-bandwidth-estimation/) for more details. -> Note: the above assumes that the beacon node has no attached validators. If the beacon node has attached validators, then it is required to custody (store) a certain number of data columns which increases with the number of staked ETH. For example, if the staked ETH is `$\geq$` 2048 ETH, then due to custody requirement, it will make the beacon node a semi-supernode ; if `$\geq$` 4096 ETH, the beacon node will be a supernode without needing the flag. +> Note: the above assumes that the beacon node has no attached validators. If the beacon node has attached validators, then it is required to custody (store) a certain number of data columns which increases with the number of staked ETH. For example, if the staked ETH is >= 2048 ETH, then due to custody requirement, it will make the beacon node a semi-supernode ; if >= 4096 ETH, the beacon node will be a supernode without needing the flag. Table below summarizes the role of relevant flags in Lighthouse beacon node: @@ -17,7 +17,7 @@ While both `--supernode` and `--semi-supernode` can serve blobs, a supernode wil Combining `--prune-blobs false` and `--supernode` (or `--semi-supernode`) implies that no data columns will be pruned, and the node will be able to serve blobs since using the flag. -If you want historical blob data beyond the data availability period (18 days), you can backfill blobs or data columns with the experimental flag `--complete-blobs-backfill`. However, do note that this is an experimental feature and it may cause some issues, e.g., the node may block most of its peers. +If you want historical blob data beyond the data availability period (18 days), you can backfill blobs or data columns with the experimental flag `--complete-blob-backfill`. However, do note that this is an experimental feature and it only works when the flag is present during a fresh checkpoint sync when the database is initialised. The flag will have no effect if the node is already running (with an existing database). During blob backfill, the feature may cause some issues, e.g., the node may block most of its peers. **⚠️ The following section on Blobs is archived and not maintained as blobs are stored in the form of data columns after the Fulu fork ⚠️** diff --git a/book/src/advanced_database_migrations.md b/book/src/advanced_database_migrations.md index 3552a90b0e..115a885878 100644 --- a/book/src/advanced_database_migrations.md +++ b/book/src/advanced_database_migrations.md @@ -17,6 +17,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| +| v8.0.0 | Nov 2025 | v28 | yes before Fulu | | v8.0.0-rc.0 | Sep 2025 | v28 | yes before Fulu | | v7.1.0 | Jul 2025 | v26 | yes | | v7.0.0 | Apr 2025 | v22 | no | diff --git a/book/src/advanced_proposer_only.md b/book/src/advanced_proposer_only.md index f55e51606c..1ef7a06655 100644 --- a/book/src/advanced_proposer_only.md +++ b/book/src/advanced_proposer_only.md @@ -23,9 +23,7 @@ normal activities such as performing attestations, but it will make the node harder to identify as a potential node to attack and will also consume less resources. -Specifically, this flag reduces the default peer count (to a safe minimal -number as maintaining peers on attestation subnets do not need to be considered), -prevents the node from subscribing to any attestation-subnets or +Specifically, this flag prevents the node from subscribing to any attestation-subnets or sync-committees which is a primary way for attackers to de-anonymize validators. From 8f7dcf02ba54edf264acffbe26e01fbbba23c18e Mon Sep 17 00:00:00 2001 From: hopinheimer <48147533+hopinheimer@users.noreply.github.com> Date: Wed, 5 Nov 2025 11:49:35 +0530 Subject: [PATCH 36/44] Fix unaggregated delay metric (#8366) while working on this #7892 @michaelsproul pointed it might be a good metric to measure the delay from start of the slot instead of the current `slot_duration / 3`, since the attestations duties start before the `1/3rd` mark now with the change in the link PR. Co-Authored-By: hopinheimer Co-Authored-By: hopinheimer <48147533+hopinheimer@users.noreply.github.com> --- beacon_node/beacon_chain/src/metrics.rs | 2 +- beacon_node/beacon_chain/src/validator_monitor.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 8f1da7b67b..e6557c7a27 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1185,7 +1185,7 @@ pub static VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS: LazyLock< > = LazyLock::new(|| { try_create_histogram_vec( "validator_monitor_unaggregated_attestation_delay_seconds", - "The delay between when the validator should send the attestation and when it was received.", + "The delay between when the validator sent the attestation and the start of the slot.", &["src", "validator"], ) }); diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 00c30e5ab1..ba06d5da4e 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -1214,7 +1214,7 @@ impl ValidatorMonitor { let delay = get_message_delay_ms( seen_timestamp, data.slot, - slot_clock.unagg_attestation_production_delay(), + Duration::from_secs(0), slot_clock, ); From 7b1cbca264f32a11857d7648a1c79225fe8f289d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 5 Nov 2025 18:46:30 +1100 Subject: [PATCH 37/44] Downgrade and remove unnecessary logs (#8367) ### Downgrade a non error to `Debug` I noticed this error on one of our hoodi nodes: ``` Nov 04 05:13:38.892 ERROR Error during data column reconstruction block_root: 0x4271b9efae7deccec3989bd2418e998b83ce8144210c2b17200abb62b7951190, error: DuplicateFullyImported(0x4271b9efae7deccec3989bd2418e998b83ce8144210c2b17200abb62b7951190) ``` This shouldn't be logged as an error and it's due to a normal race condition, and it doesn't impact the node negatively. ### Remove spammy logs This logs is filling up the log files quite quickly and it is also something we'd expect during normal operation - getting columns via EL before gossip. We haven't found this debug log to be useful, so I propose we remove it to avoid spamming debug logs. ``` Received already available column sidecar. Ignoring the column sidecar ``` In the process of removing this, I noticed we aren't propagating the validation result, which I think we should so I've added this. The impact should be quite minimal - the message will stay in the gossip memcache for a bit longer but should be evicted in the next heartbeat. Co-Authored-By: Jimmy Chen --- .../src/network_beacon_processor/gossip_methods.rs | 11 +++++------ .../network/src/network_beacon_processor/mod.rs | 3 +++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 1ffe921e58..eb70147c6e 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -735,12 +735,11 @@ impl NetworkBeaconProcessor { // Data column is available via either the EL or reconstruction. // Do not penalise the peer. // Gossip filter should filter any duplicates received after this. - debug!( - %slot, - %block_root, - %index, - "Received already available column sidecar. Ignoring the column sidecar" - ) + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); } GossipDataColumnError::FutureSlot { .. } | GossipDataColumnError::PastFinalizedSlot { .. } => { diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 5fa2361f28..bebda36d71 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -866,6 +866,9 @@ impl NetworkBeaconProcessor { "Reconstruction not required for block" ); } + Err(BlockError::DuplicateFullyImported(_)) => { + debug!("Block already imported in parallel with reconstruction"); + } Err(e) => { error!( %block_root, From 3066f0bef297ab5eb0201e43b19ca57ba6536128 Mon Sep 17 00:00:00 2001 From: Mac L Date: Wed, 5 Nov 2025 11:46:32 +0400 Subject: [PATCH 38/44] Prepare `sensitive_url` for `crates.io` (#8223) Another good candidate for publishing separately from Lighthouse is `sensitive_url` as it's a general utility crate and not related to Ethereum. This PR prepares it to be spun out into its own crate. I've made the `full` field on `SensitiveUrl` private and instead provided an explicit getter called `.expose_full()`. It's a bit ugly for the diff but I prefer the explicit nature of the getter. I've also added some extra tests and doc strings along with feature gating `Serialize` and `Deserialize` implementations behind the `serde` feature. Co-Authored-By: Mac L --- Cargo.lock | 1 + Cargo.toml | 2 +- beacon_node/builder_client/src/lib.rs | 14 +- .../execution_layer/src/engine_api/http.rs | 2 +- common/eth2/src/lib.rs | 16 +- common/eth2/src/lighthouse.rs | 26 +-- common/eth2/src/lighthouse_vc/http_client.rs | 36 ++-- common/monitoring_api/src/lib.rs | 2 +- common/sensitive_url/Cargo.toml | 8 +- common/sensitive_url/src/lib.rs | 177 +++++++++++++++--- lighthouse/tests/beacon_node.rs | 14 +- lighthouse/tests/validator_client.rs | 4 +- .../src/test_rig.rs | 4 +- .../beacon_node_fallback/src/lib.rs | 4 +- validator_manager/src/exit_validators.rs | 5 +- validator_manager/src/list_validators.rs | 3 +- 16 files changed, 225 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 676f674b1b..fad3ad2ffc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8485,6 +8485,7 @@ name = "sensitive_url" version = "0.1.0" dependencies = [ "serde", + "serde_json", "url", ] diff --git a/Cargo.toml b/Cargo.toml index 1dfc753b8c..d09b0fcd80 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -225,7 +225,7 @@ rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } rust_eth_kzg = "0.9" safe_arith = "0.1" -sensitive_url = { path = "common/sensitive_url" } +sensitive_url = { path = "common/sensitive_url", features = ["serde"] } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_repr = "0.1" diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 6b993542f3..b486e77083 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -270,7 +270,7 @@ impl BuilderHttpClient { &self, validator: &[SignedValidatorRegistrationData], ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -289,7 +289,7 @@ impl BuilderHttpClient { &self, blinded_block: &SignedBlindedBeaconBlock, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); let body = blinded_block.as_ssz_bytes(); @@ -337,7 +337,7 @@ impl BuilderHttpClient { &self, blinded_block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); let body = blinded_block.as_ssz_bytes(); @@ -387,7 +387,7 @@ impl BuilderHttpClient { &self, blinded_block: &SignedBlindedBeaconBlock, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -430,7 +430,7 @@ impl BuilderHttpClient { &self, blinded_block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -480,7 +480,7 @@ impl BuilderHttpClient { parent_hash: ExecutionBlockHash, pubkey: &PublicKeyBytes, ) -> Result>>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -521,7 +521,7 @@ impl BuilderHttpClient { /// `GET /eth/v1/builder/status` pub async fn get_builder_status(&self) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index a8dbed34ce..8f7564ace6 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -652,7 +652,7 @@ impl HttpJsonRpc { let mut request = self .client - .post(self.url.full.clone()) + .post(self.url.expose_full().clone()) .timeout(timeout) .header(CONTENT_TYPE, "application/json") .json(&body); diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 2641a4c02e..e8e6663d46 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -30,7 +30,7 @@ use reqwest::{ }; pub use reqwest::{StatusCode, Url}; use reqwest_eventsource::{Event, EventSource}; -pub use sensitive_url::{SensitiveError, SensitiveUrl}; +pub use sensitive_url::SensitiveUrl; use serde::{Serialize, de::DeserializeOwned}; use ssz::Encode; use std::fmt; @@ -152,12 +152,6 @@ impl fmt::Display for BeaconNodeHttpClient { } } -impl AsRef for BeaconNodeHttpClient { - fn as_ref(&self) -> &str { - self.server.as_ref() - } -} - impl BeaconNodeHttpClient { pub fn new(server: SensitiveUrl, timeouts: Timeouts) -> Self { Self { @@ -178,10 +172,14 @@ impl BeaconNodeHttpClient { timeouts, } } + // Returns a reference to the `SensitiveUrl` of the server. + pub fn server(&self) -> &SensitiveUrl { + &self.server + } /// Return the path with the standard `/eth/vX` prefix applied. fn eth_path(&self, version: EndpointVersion) -> Result { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -2613,7 +2611,7 @@ impl BeaconNodeHttpClient { ids: &[u64], epoch: Epoch, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 4f9a049e44..993c263cbf 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -173,7 +173,7 @@ pub struct DepositLog { impl BeaconNodeHttpClient { /// `GET lighthouse/health` pub async fn get_lighthouse_health(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -185,7 +185,7 @@ impl BeaconNodeHttpClient { /// `GET lighthouse/syncing` pub async fn get_lighthouse_syncing(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -197,7 +197,7 @@ impl BeaconNodeHttpClient { /// `GET lighthouse/custody/info` pub async fn get_lighthouse_custody_info(&self) -> Result { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -210,7 +210,7 @@ impl BeaconNodeHttpClient { /// `POST lighthouse/custody/backfill` pub async fn post_lighthouse_custody_backfill(&self) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -231,7 +231,7 @@ impl BeaconNodeHttpClient { /// `GET lighthouse/proto_array` pub async fn get_lighthouse_proto_array(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -246,7 +246,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -264,7 +264,7 @@ impl BeaconNodeHttpClient { epoch: Epoch, validator_id: ValidatorId, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -278,7 +278,7 @@ impl BeaconNodeHttpClient { /// `POST lighthouse/database/reconstruct` pub async fn post_lighthouse_database_reconstruct(&self) -> Result { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -291,7 +291,7 @@ impl BeaconNodeHttpClient { /// `POST lighthouse/add_peer` pub async fn post_lighthouse_add_peer(&self, req: AdminPeer) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -303,7 +303,7 @@ impl BeaconNodeHttpClient { /// `POST lighthouse/remove_peer` pub async fn post_lighthouse_remove_peer(&self, req: AdminPeer) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -323,7 +323,7 @@ impl BeaconNodeHttpClient { start_slot: Slot, end_slot: Slot, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -344,7 +344,7 @@ impl BeaconNodeHttpClient { start_epoch: Epoch, end_epoch: Epoch, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -366,7 +366,7 @@ impl BeaconNodeHttpClient { end_epoch: Epoch, target: String, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index c4fddb97d7..8c9d3397a8 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -283,7 +283,7 @@ impl ValidatorClientHttpClient { /// `GET lighthouse/version` pub async fn get_lighthouse_version(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -295,7 +295,7 @@ impl ValidatorClientHttpClient { /// `GET lighthouse/health` pub async fn get_lighthouse_health(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -309,7 +309,7 @@ impl ValidatorClientHttpClient { pub async fn get_lighthouse_spec( &self, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -323,7 +323,7 @@ impl ValidatorClientHttpClient { pub async fn get_lighthouse_validators( &self, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -338,7 +338,7 @@ impl ValidatorClientHttpClient { &self, validator_pubkey: &PublicKeyBytes, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -354,7 +354,7 @@ impl ValidatorClientHttpClient { &self, validators: Vec, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -369,7 +369,7 @@ impl ValidatorClientHttpClient { &self, request: &CreateValidatorsMnemonicRequest, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -385,7 +385,7 @@ impl ValidatorClientHttpClient { &self, request: &KeystoreValidatorsPostRequest, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -401,7 +401,7 @@ impl ValidatorClientHttpClient { &self, request: &[Web3SignerValidatorRequest], ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -424,7 +424,7 @@ impl ValidatorClientHttpClient { prefer_builder_proposals: Option, graffiti: Option, ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -451,7 +451,7 @@ impl ValidatorClientHttpClient { &self, req: &DeleteKeystoresRequest, ) -> Result { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -462,7 +462,7 @@ impl ValidatorClientHttpClient { } fn make_keystores_url(&self) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -472,7 +472,7 @@ impl ValidatorClientHttpClient { } fn make_remotekeys_url(&self) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -482,7 +482,7 @@ impl ValidatorClientHttpClient { } fn make_fee_recipient_url(&self, pubkey: &PublicKeyBytes) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -494,7 +494,7 @@ impl ValidatorClientHttpClient { } fn make_graffiti_url(&self, pubkey: &PublicKeyBytes) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -506,7 +506,7 @@ impl ValidatorClientHttpClient { } fn make_gas_limit_url(&self, pubkey: &PublicKeyBytes) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -519,7 +519,7 @@ impl ValidatorClientHttpClient { /// `GET lighthouse/auth` pub async fn get_auth(&self) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("lighthouse") @@ -635,7 +635,7 @@ impl ValidatorClientHttpClient { pubkey: &PublicKeyBytes, epoch: Option, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? diff --git a/common/monitoring_api/src/lib.rs b/common/monitoring_api/src/lib.rs index 465618c9a8..03b93f2faa 100644 --- a/common/monitoring_api/src/lib.rs +++ b/common/monitoring_api/src/lib.rs @@ -195,7 +195,7 @@ impl MonitoringHttpClient { endpoint = %self.monitoring_endpoint, "Sending metrics to remote endpoint" ); - self.post(self.monitoring_endpoint.full.clone(), &metrics) + self.post(self.monitoring_endpoint.expose_full().clone(), &metrics) .await } } diff --git a/common/sensitive_url/Cargo.toml b/common/sensitive_url/Cargo.toml index ff56209722..3793cc5139 100644 --- a/common/sensitive_url/Cargo.toml +++ b/common/sensitive_url/Cargo.toml @@ -5,6 +5,12 @@ authors = ["Mac L "] edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +serde = ["dep:serde"] + [dependencies] -serde = { workspace = true } +serde = { workspace = true, optional = true } url = { workspace = true } + +[dev-dependencies] +serde_json = { workspace = true } diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs index 64ad070a1f..3f9240268d 100644 --- a/common/sensitive_url/src/lib.rs +++ b/common/sensitive_url/src/lib.rs @@ -1,26 +1,69 @@ +#[cfg(feature = "serde")] use serde::{Deserialize, Deserializer, Serialize, Serializer, de}; use std::fmt; use std::str::FromStr; use url::Url; +/// Errors that can occur when creating or parsing a `SensitiveUrl`. #[derive(Debug)] -pub enum SensitiveError { +pub enum Error { + /// The URL cannot be used as a base URL. InvalidUrl(String), + /// Failed to parse the URL string. ParseError(url::ParseError), + /// Failed to redact sensitive information from the URL. RedactError(String), } -impl fmt::Display for SensitiveError { +impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) + match self { + Error::InvalidUrl(msg) => write!(f, "Invalid URL: {}", msg), + Error::ParseError(e) => write!(f, "Parse error: {}", e), + Error::RedactError(msg) => write!(f, "Redact error: {}", msg), + } } } -// Wrapper around Url which provides a custom `Display` implementation to protect user secrets. -#[derive(Clone, PartialEq)] +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::ParseError(e) => Some(e), + _ => None, + } + } +} + +/// A URL wrapper that redacts sensitive information in `Display` and `Debug` output. +/// +/// This type stores both the full URL (with credentials, paths, and query parameters) +/// and a redacted version (containing only the scheme, host, and port). The redacted +/// version is used when displaying or debugging to prevent accidental leakage of +/// credentials in logs. +/// +/// Note that `SensitiveUrl` specifically does NOT implement `Deref`, meaning you cannot call +/// `Url` methods like `.password()` or `.scheme()` directly on `SensitiveUrl`. You must first +/// explicitly call `.expose_full()`. +/// +/// # Examples +/// +/// ``` +/// use sensitive_url::SensitiveUrl; +/// +/// let url = SensitiveUrl::parse("https://user:pass@example.com/api?token=secret").unwrap(); +/// +/// // Display shows only the redacted version: +/// assert_eq!(url.to_string(), "https://example.com/"); +/// +/// // But you can still access the full URL when needed: +/// let full = url.expose_full(); +/// assert_eq!(full.to_string(), "https://user:pass@example.com/api?token=secret"); +/// assert_eq!(full.password(), Some("pass")); +/// ``` +#[derive(Clone, PartialEq, Eq, Hash)] pub struct SensitiveUrl { - pub full: Url, - pub redacted: String, + full: Url, + redacted: String, } impl fmt::Display for SensitiveUrl { @@ -31,16 +74,14 @@ impl fmt::Display for SensitiveUrl { impl fmt::Debug for SensitiveUrl { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.redacted.fmt(f) - } -} - -impl AsRef for SensitiveUrl { - fn as_ref(&self) -> &str { - self.redacted.as_str() + f.debug_struct("SensitiveUrl") + .field("redacted", &self.redacted) + // Maintains traditional `Debug` format but hides the 'full' field. + .finish_non_exhaustive() } } +#[cfg(feature = "serde")] impl Serialize for SensitiveUrl { fn serialize(&self, serializer: S) -> Result where @@ -50,6 +91,7 @@ impl Serialize for SensitiveUrl { } } +#[cfg(feature = "serde")] impl<'de> Deserialize<'de> for SensitiveUrl { fn deserialize(deserializer: D) -> Result where @@ -62,7 +104,7 @@ impl<'de> Deserialize<'de> for SensitiveUrl { } impl FromStr for SensitiveUrl { - type Err = SensitiveError; + type Err = Error; fn from_str(s: &str) -> Result { Self::parse(s) @@ -70,26 +112,28 @@ impl FromStr for SensitiveUrl { } impl SensitiveUrl { - pub fn parse(url: &str) -> Result { - let surl = Url::parse(url).map_err(SensitiveError::ParseError)?; + /// Attempts to parse a `&str` into a `SensitiveUrl`. + pub fn parse(url: &str) -> Result { + let surl = Url::parse(url).map_err(Error::ParseError)?; SensitiveUrl::new(surl) } - pub fn new(full: Url) -> Result { + /// Creates a `SensitiveUrl` from an existing `Url`. + pub fn new(full: Url) -> Result { let mut redacted = full.clone(); redacted .path_segments_mut() - .map_err(|_| SensitiveError::InvalidUrl("URL cannot be a base.".to_string()))? + .map_err(|_| Error::InvalidUrl("URL cannot be a base.".to_string()))? .clear(); redacted.set_query(None); if redacted.has_authority() { - redacted.set_username("").map_err(|_| { - SensitiveError::RedactError("Unable to redact username.".to_string()) - })?; - redacted.set_password(None).map_err(|_| { - SensitiveError::RedactError("Unable to redact password.".to_string()) - })?; + redacted + .set_username("") + .map_err(|_| Error::RedactError("Unable to redact username.".to_string()))?; + redacted + .set_password(None) + .map_err(|_| Error::RedactError("Unable to redact password.".to_string()))?; } Ok(Self { @@ -97,6 +141,16 @@ impl SensitiveUrl { redacted: redacted.to_string(), }) } + + /// Returns a reference to the full, unredacted URL. + pub fn expose_full(&self) -> &Url { + &self.full + } + + /// Returns the redacted URL as a `&str`. + pub fn redacted(&self) -> &str { + &self.redacted + } } #[cfg(test)] @@ -105,16 +159,81 @@ mod tests { #[test] fn redact_remote_url() { - let full = "https://project:secret@example.com/example?somequery"; + let full = "https://user:pass@example.com/example?somequery"; let surl = SensitiveUrl::parse(full).unwrap(); assert_eq!(surl.to_string(), "https://example.com/"); - assert_eq!(surl.full.to_string(), full); + assert_eq!(surl.expose_full().to_string(), full); } + #[test] fn redact_localhost_url() { - let full = "http://localhost:5052/"; + let full = "http://user:pass@localhost:5052/"; let surl = SensitiveUrl::parse(full).unwrap(); assert_eq!(surl.to_string(), "http://localhost:5052/"); - assert_eq!(surl.full.to_string(), full); + assert_eq!(surl.expose_full().to_string(), full); + } + + #[test] + fn test_no_credentials() { + let full = "https://example.com/path"; + let surl = SensitiveUrl::parse(full).unwrap(); + assert_eq!(surl.to_string(), "https://example.com/"); + assert_eq!(surl.expose_full().to_string(), full); + } + + #[test] + fn test_display() { + let full = "https://user:pass@example.com/api?token=secret"; + let surl = SensitiveUrl::parse(full).unwrap(); + + let display = surl.to_string(); + assert_eq!(display, "https://example.com/"); + } + + #[test] + fn test_debug() { + let full = "https://user:pass@example.com/api?token=secret"; + let surl = SensitiveUrl::parse(full).unwrap(); + + let debug = format!("{:?}", surl); + + assert_eq!( + debug, + "SensitiveUrl { redacted: \"https://example.com/\", .. }" + ); + } + + #[cfg(feature = "serde")] + mod serde_tests { + use super::*; + + #[test] + fn test_serialize() { + let full = "https://user:pass@example.com/api?token=secret"; + let surl = SensitiveUrl::parse(full).unwrap(); + + let json = serde_json::to_string(&surl).unwrap(); + assert_eq!(json, format!("\"{}\"", full)); + } + + #[test] + fn test_deserialize() { + let full = "https://user:pass@example.com/api?token=secret"; + let json = format!("\"{}\"", full); + + let surl: SensitiveUrl = serde_json::from_str(&json).unwrap(); + assert_eq!(surl.expose_full().as_str(), full); + } + + #[test] + fn test_roundtrip() { + let full = "https://user:pass@example.com/api?token=secret"; + let original = SensitiveUrl::parse(full).unwrap(); + + let json = serde_json::to_string(&original).unwrap(); + let deserialized: SensitiveUrl = serde_json::from_str(&json).unwrap(); + + assert_eq!(deserialized.expose_full(), original.expose_full()); + } } } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 8342b02173..207324ea33 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -481,7 +481,12 @@ fn run_execution_jwt_secret_key_is_persisted() { .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); assert_eq!( - config.execution_endpoint.as_ref().unwrap().full.to_string(), + config + .execution_endpoint + .as_ref() + .unwrap() + .expose_full() + .to_string(), "http://localhost:8551/" ); let mut file_jwt_secret_key = String::new(); @@ -532,7 +537,12 @@ fn bellatrix_jwt_secrets_flag() { .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); assert_eq!( - config.execution_endpoint.as_ref().unwrap().full.to_string(), + config + .execution_endpoint + .as_ref() + .unwrap() + .expose_full() + .to_string(), "http://localhost:8551/" ); assert_eq!( diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 398c6fbd6b..ee3e910b36 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -109,12 +109,12 @@ fn beacon_nodes_flag() { .run() .with_config(|config| { assert_eq!( - config.beacon_nodes[0].full.to_string(), + config.beacon_nodes[0].expose_full().to_string(), "http://localhost:1001/" ); assert_eq!(config.beacon_nodes[0].to_string(), "http://localhost:1001/"); assert_eq!( - config.beacon_nodes[1].full.to_string(), + config.beacon_nodes[1].expose_full().to_string(), "https://project:secret@infura.io/" ); assert_eq!(config.beacon_nodes[1].to_string(), "https://infura.io/"); diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 05ec0a2f19..9e45a78870 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -64,7 +64,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: let client = Client::builder().build().unwrap(); let request = client - .post(http_url.full.clone()) + .post(http_url.expose_full().clone()) .header(CONTENT_TYPE, "application/json") .json(&body); @@ -90,7 +90,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: ); let request = client - .post(http_url.full.clone()) + .post(http_url.expose_full().clone()) .header(CONTENT_TYPE, "application/json") .json(&body); diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index a3f60d2de0..0f13d8c8b7 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -656,7 +656,7 @@ impl BeaconNodeFallback { R: Future>, Err: Debug, { - inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.as_ref()]); + inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.server().redacted()]); // There exists a race condition where `func` may be called when the candidate is // actually not ready. We deem this an acceptable inefficiency. @@ -668,7 +668,7 @@ impl BeaconNodeFallback { error = ?e, "Request to beacon node failed" ); - inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.as_ref()]); + inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.server().redacted()]); Err((candidate.to_string(), Error::RequestFailed(e))) } } diff --git a/validator_manager/src/exit_validators.rs b/validator_manager/src/exit_validators.rs index a6bbf05fb4..4a398793ce 100644 --- a/validator_manager/src/exit_validators.rs +++ b/validator_manager/src/exit_validators.rs @@ -191,8 +191,7 @@ async fn run(config: ExitConfig) -> Result<(), String> { // Only publish the voluntary exit if the --beacon-node flag is present if let Some(ref beacon_url) = beacon_url { let beacon_node = BeaconNodeHttpClient::new( - SensitiveUrl::parse(beacon_url.as_ref()) - .map_err(|e| format!("Failed to parse beacon http server: {:?}", e))?, + beacon_url.clone(), Timeouts::set_all(Duration::from_secs(12)), ); @@ -399,7 +398,7 @@ mod test { }) .collect(); - let beacon_url = SensitiveUrl::parse(self.beacon_node.client.as_ref()).unwrap(); + let beacon_url = self.beacon_node.client.server().clone(); let validators_to_exit = index_of_validators_to_exit .iter() diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index b064982adf..082894a995 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -134,8 +134,7 @@ async fn run(config: ListConfig) -> Result Date: Wed, 5 Nov 2025 23:02:21 +1100 Subject: [PATCH 39/44] Remove ecdsa feature of libp2p (#8374) This compiles, is there any reason to keep `ecdsa`? CC @jxs Co-Authored-By: Michael Sproul --- Cargo.lock | 34 ----------------------- beacon_node/lighthouse_network/Cargo.toml | 1 - 2 files changed, 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fad3ad2ffc..c8c14c7257 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2236,7 +2236,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", - "pem-rfc7468", "zeroize", ] @@ -2722,7 +2721,6 @@ dependencies = [ "ff 0.13.1", "generic-array 0.14.7", "group 0.13.0", - "pem-rfc7468", "pkcs8 0.10.2", "rand_core 0.6.4", "sec1 0.7.3", @@ -5300,10 +5298,8 @@ dependencies = [ "hkdf", "k256 0.13.4", "multihash", - "p256", "quick-protobuf", "rand 0.8.5", - "sec1 0.7.3", "sha2 0.10.8", "thiserror 2.0.12", "tracing", @@ -6774,18 +6770,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "p256" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" -dependencies = [ - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", - "primeorder", - "sha2 0.10.8", -] - [[package]] name = "pairing" version = "0.23.0" @@ -6951,15 +6935,6 @@ dependencies = [ "serde", ] -[[package]] -name = "pem-rfc7468" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" -dependencies = [ - "base64ct", -] - [[package]] name = "percent-encoding" version = "2.3.1" @@ -7182,15 +7157,6 @@ dependencies = [ "syn 2.0.100", ] -[[package]] -name = "primeorder" -version = "0.13.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" -dependencies = [ - "elliptic-curve 0.13.8", -] - [[package]] name = "primitive-types" version = "0.10.1" diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 7e69f6770b..035452e4b2 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -64,7 +64,6 @@ features = [ "plaintext", "secp256k1", "macros", - "ecdsa", "metrics", "quic", "upnp", From e6e3d783ad50fdec81d9b2c3c2a5755b659f2fa3 Mon Sep 17 00:00:00 2001 From: lmnzx Date: Wed, 5 Nov 2025 18:17:36 +0530 Subject: [PATCH 40/44] CI workflows to use warpbuild ci runner (#8343) Self hosted GitHub Runners review and improvements local testnet workflow now uses warpbuild ci runner Co-Authored-By: lemon Co-Authored-By: antondlr --- .github/workflows/local-testnet.yml | 18 +++++------ .github/workflows/test-suite.yml | 46 ++++------------------------- 2 files changed, 15 insertions(+), 49 deletions(-) diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index c6f9c075db..c129c0ec95 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -14,7 +14,7 @@ concurrency: jobs: dockerfile-ubuntu: - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v5 @@ -31,7 +31,7 @@ jobs: retention-days: 3 run-local-testnet: - runs-on: ubuntu-22.04 + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} needs: dockerfile-ubuntu steps: - uses: actions/checkout@v5 @@ -89,7 +89,7 @@ jobs: ${{ steps.assertoor_test_result.outputs.failed_test_details }} EOF ) - + echo "Test Result: $test_result" echo "$test_status" if ! [ "$test_result" == "success" ]; then @@ -100,7 +100,7 @@ jobs: doppelganger-protection-success-test: needs: dockerfile-ubuntu - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 @@ -136,7 +136,7 @@ jobs: doppelganger-protection-failure-test: needs: dockerfile-ubuntu - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 @@ -173,7 +173,7 @@ jobs: # Tests checkpoint syncing to a live network (current fork) and a running devnet (usually next scheduled fork) checkpoint-sync-test: name: checkpoint-sync-test-${{ matrix.network }} - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} needs: dockerfile-ubuntu if: contains(github.event.pull_request.labels.*.name, 'syncing') continue-on-error: true @@ -216,7 +216,7 @@ jobs: # Test syncing from genesis on a local testnet. Aims to cover forward syncing both short and long distances. genesis-sync-test: name: genesis-sync-test-${{ matrix.fork }}-${{ matrix.offline_secs }}s - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} needs: dockerfile-ubuntu strategy: matrix: @@ -259,7 +259,7 @@ jobs: # a PR is safe to merge. New jobs should be added here. local-testnet-success: name: local-testnet-success - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} needs: [ 'dockerfile-ubuntu', 'run-local-testnet', @@ -272,4 +272,4 @@ jobs: - name: Check that success job is dependent on all others run: | exclude_jobs='checkpoint-sync-test' - ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success "$exclude_jobs" + ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success "$exclude_jobs" diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 0cdd8211da..cc7282c351 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -22,8 +22,6 @@ env: # NOTE: this token is a personal access token on Jimmy's account due to the default GITHUB_TOKEN # not having access to other repositories. We should eventually devise a better solution here. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.LIGHTHOUSE_GITHUB_TOKEN }} - # Enable self-hosted runners for the sigp repo only. - SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} # Disable incremental compilation CARGO_INCREMENTAL: 0 # Enable portable to prevent issues with caching `blst` for the wrong CPU type @@ -78,8 +76,7 @@ jobs: name: release-tests-ubuntu needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v5 # Set Java version to 21. (required since Web3Signer 24.12.0). @@ -88,7 +85,6 @@ jobs: distribution: 'temurin' java-version: '21' - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -97,7 +93,6 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) - if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d @@ -111,14 +106,12 @@ jobs: name: beacon-chain-tests needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -126,22 +119,16 @@ jobs: bins: cargo-nextest - name: Run beacon_chain tests for all known forks run: make test-beacon-chain - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats http-api-tests: name: http-api-tests needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -149,10 +136,6 @@ jobs: bins: cargo-nextest - name: Run http_api tests for all recent forks run: make test-http-api - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats op-pool-tests: name: op-pool-tests needs: [check-labels] @@ -220,29 +203,22 @@ jobs: name: debug-tests-ubuntu needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable bins: cargo-nextest - name: Install Foundry (anvil) - if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in debug run: make test-debug - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats state-transition-vectors-ubuntu: name: state-transition-vectors-ubuntu needs: [check-labels] @@ -261,14 +237,12 @@ jobs: name: ef-tests-ubuntu needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -276,10 +250,6 @@ jobs: bins: cargo-nextest - name: Run consensus-spec-tests with blst and fake_crypto run: make test-ef - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats basic-simulator-ubuntu: name: basic-simulator-ubuntu needs: [check-labels] @@ -328,11 +298,10 @@ jobs: name: execution-engine-integration-ubuntu needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -340,9 +309,6 @@ jobs: cache: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Add go compiler to $PATH - if: env.SELF_HOSTED_RUNNERS == 'true' - run: echo "/usr/local/go/bin" >> $GITHUB_PATH - name: Run exec engine integration tests in release run: make test-exec-engine check-code: From 0090b35ee048dac346063d5a5d9fb15510002f6e Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 6 Nov 2025 08:17:45 +0400 Subject: [PATCH 41/44] Remove `sensitive_url` and import from `crates.io` (#8377) Use the recently published `sensitive_url` and remove it from Lighthouse Co-Authored-By: Mac L --- Cargo.lock | 3 +- Cargo.toml | 3 +- common/sensitive_url/Cargo.toml | 16 --- common/sensitive_url/src/lib.rs | 239 -------------------------------- testing/simulator/Cargo.toml | 2 +- 5 files changed, 4 insertions(+), 259 deletions(-) delete mode 100644 common/sensitive_url/Cargo.toml delete mode 100644 common/sensitive_url/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index c8c14c7257..1d1108b1d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8449,9 +8449,10 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "sensitive_url" version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7b0221fa9905eec4163dbf7660b1876cc95663af1deddc3e19ebe49167c58c" dependencies = [ "serde", - "serde_json", "url", ] diff --git a/Cargo.toml b/Cargo.toml index d09b0fcd80..4d357816d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,6 @@ members = [ "common/network_utils", "common/oneshot_broadcast", "common/pretty_reqwest_error", - "common/sensitive_url", "common/slot_clock", "common/system_health", "common/target_check", @@ -225,7 +224,7 @@ rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } rust_eth_kzg = "0.9" safe_arith = "0.1" -sensitive_url = { path = "common/sensitive_url", features = ["serde"] } +sensitive_url = { version = "0.1", features = ["serde"] } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_repr = "0.1" diff --git a/common/sensitive_url/Cargo.toml b/common/sensitive_url/Cargo.toml deleted file mode 100644 index 3793cc5139..0000000000 --- a/common/sensitive_url/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "sensitive_url" -version = "0.1.0" -authors = ["Mac L "] -edition = { workspace = true } -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[features] -serde = ["dep:serde"] - -[dependencies] -serde = { workspace = true, optional = true } -url = { workspace = true } - -[dev-dependencies] -serde_json = { workspace = true } diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs deleted file mode 100644 index 3f9240268d..0000000000 --- a/common/sensitive_url/src/lib.rs +++ /dev/null @@ -1,239 +0,0 @@ -#[cfg(feature = "serde")] -use serde::{Deserialize, Deserializer, Serialize, Serializer, de}; -use std::fmt; -use std::str::FromStr; -use url::Url; - -/// Errors that can occur when creating or parsing a `SensitiveUrl`. -#[derive(Debug)] -pub enum Error { - /// The URL cannot be used as a base URL. - InvalidUrl(String), - /// Failed to parse the URL string. - ParseError(url::ParseError), - /// Failed to redact sensitive information from the URL. - RedactError(String), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Error::InvalidUrl(msg) => write!(f, "Invalid URL: {}", msg), - Error::ParseError(e) => write!(f, "Parse error: {}", e), - Error::RedactError(msg) => write!(f, "Redact error: {}", msg), - } - } -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::ParseError(e) => Some(e), - _ => None, - } - } -} - -/// A URL wrapper that redacts sensitive information in `Display` and `Debug` output. -/// -/// This type stores both the full URL (with credentials, paths, and query parameters) -/// and a redacted version (containing only the scheme, host, and port). The redacted -/// version is used when displaying or debugging to prevent accidental leakage of -/// credentials in logs. -/// -/// Note that `SensitiveUrl` specifically does NOT implement `Deref`, meaning you cannot call -/// `Url` methods like `.password()` or `.scheme()` directly on `SensitiveUrl`. You must first -/// explicitly call `.expose_full()`. -/// -/// # Examples -/// -/// ``` -/// use sensitive_url::SensitiveUrl; -/// -/// let url = SensitiveUrl::parse("https://user:pass@example.com/api?token=secret").unwrap(); -/// -/// // Display shows only the redacted version: -/// assert_eq!(url.to_string(), "https://example.com/"); -/// -/// // But you can still access the full URL when needed: -/// let full = url.expose_full(); -/// assert_eq!(full.to_string(), "https://user:pass@example.com/api?token=secret"); -/// assert_eq!(full.password(), Some("pass")); -/// ``` -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct SensitiveUrl { - full: Url, - redacted: String, -} - -impl fmt::Display for SensitiveUrl { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.redacted.fmt(f) - } -} - -impl fmt::Debug for SensitiveUrl { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SensitiveUrl") - .field("redacted", &self.redacted) - // Maintains traditional `Debug` format but hides the 'full' field. - .finish_non_exhaustive() - } -} - -#[cfg(feature = "serde")] -impl Serialize for SensitiveUrl { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(self.full.as_ref()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for SensitiveUrl { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let s: String = Deserialize::deserialize(deserializer)?; - SensitiveUrl::parse(&s) - .map_err(|e| de::Error::custom(format!("Failed to deserialize sensitive URL {:?}", e))) - } -} - -impl FromStr for SensitiveUrl { - type Err = Error; - - fn from_str(s: &str) -> Result { - Self::parse(s) - } -} - -impl SensitiveUrl { - /// Attempts to parse a `&str` into a `SensitiveUrl`. - pub fn parse(url: &str) -> Result { - let surl = Url::parse(url).map_err(Error::ParseError)?; - SensitiveUrl::new(surl) - } - - /// Creates a `SensitiveUrl` from an existing `Url`. - pub fn new(full: Url) -> Result { - let mut redacted = full.clone(); - redacted - .path_segments_mut() - .map_err(|_| Error::InvalidUrl("URL cannot be a base.".to_string()))? - .clear(); - redacted.set_query(None); - - if redacted.has_authority() { - redacted - .set_username("") - .map_err(|_| Error::RedactError("Unable to redact username.".to_string()))?; - redacted - .set_password(None) - .map_err(|_| Error::RedactError("Unable to redact password.".to_string()))?; - } - - Ok(Self { - full, - redacted: redacted.to_string(), - }) - } - - /// Returns a reference to the full, unredacted URL. - pub fn expose_full(&self) -> &Url { - &self.full - } - - /// Returns the redacted URL as a `&str`. - pub fn redacted(&self) -> &str { - &self.redacted - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn redact_remote_url() { - let full = "https://user:pass@example.com/example?somequery"; - let surl = SensitiveUrl::parse(full).unwrap(); - assert_eq!(surl.to_string(), "https://example.com/"); - assert_eq!(surl.expose_full().to_string(), full); - } - - #[test] - fn redact_localhost_url() { - let full = "http://user:pass@localhost:5052/"; - let surl = SensitiveUrl::parse(full).unwrap(); - assert_eq!(surl.to_string(), "http://localhost:5052/"); - assert_eq!(surl.expose_full().to_string(), full); - } - - #[test] - fn test_no_credentials() { - let full = "https://example.com/path"; - let surl = SensitiveUrl::parse(full).unwrap(); - assert_eq!(surl.to_string(), "https://example.com/"); - assert_eq!(surl.expose_full().to_string(), full); - } - - #[test] - fn test_display() { - let full = "https://user:pass@example.com/api?token=secret"; - let surl = SensitiveUrl::parse(full).unwrap(); - - let display = surl.to_string(); - assert_eq!(display, "https://example.com/"); - } - - #[test] - fn test_debug() { - let full = "https://user:pass@example.com/api?token=secret"; - let surl = SensitiveUrl::parse(full).unwrap(); - - let debug = format!("{:?}", surl); - - assert_eq!( - debug, - "SensitiveUrl { redacted: \"https://example.com/\", .. }" - ); - } - - #[cfg(feature = "serde")] - mod serde_tests { - use super::*; - - #[test] - fn test_serialize() { - let full = "https://user:pass@example.com/api?token=secret"; - let surl = SensitiveUrl::parse(full).unwrap(); - - let json = serde_json::to_string(&surl).unwrap(); - assert_eq!(json, format!("\"{}\"", full)); - } - - #[test] - fn test_deserialize() { - let full = "https://user:pass@example.com/api?token=secret"; - let json = format!("\"{}\"", full); - - let surl: SensitiveUrl = serde_json::from_str(&json).unwrap(); - assert_eq!(surl.expose_full().as_str(), full); - } - - #[test] - fn test_roundtrip() { - let full = "https://user:pass@example.com/api?token=secret"; - let original = SensitiveUrl::parse(full).unwrap(); - - let json = serde_json::to_string(&original).unwrap(); - let deserialized: SensitiveUrl = serde_json::from_str(&json).unwrap(); - - assert_eq!(deserialized.expose_full(), original.expose_full()); - } - } -} diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index cd23138a1c..54035f2e82 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -15,7 +15,7 @@ logging = { workspace = true } node_test_rig = { path = "../node_test_rig" } parking_lot = { workspace = true } rayon = { workspace = true } -sensitive_url = { path = "../../common/sensitive_url" } +sensitive_url = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } From 2c1f1c1605a736ee054c0e0518ebcedbb616f571 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Ch=C3=A1varri?= Date: Thu, 6 Nov 2025 15:13:57 +0100 Subject: [PATCH 42/44] Migrate derivative to educe (#8125) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #7001. Mostly mechanical replacement of `derivative` attributes with `educe` ones. ### **Attribute Syntax Changes** ```rust // Bounds: = "..." → (...) #[derivative(Hash(bound = "E: EthSpec"))] #[educe(Hash(bound(E: EthSpec)))] // Ignore: = "ignore" → (ignore) #[derivative(PartialEq = "ignore")] #[educe(PartialEq(ignore))] // Default values: value = "..." → expression = ... #[derivative(Default(value = "ForkName::Base"))] #[educe(Default(expression = ForkName::Base))] // Methods: format_with/compare_with = "..." → method(...) #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] #[educe(Debug(method(fmt_peer_set_as_len)))] // Empty bounds: removed entirely, educe can infer appropriate bounds #[derivative(Default(bound = ""))] #[educe(Default)] // Transparent debug: manual implementation (educe doesn't support it) #[derivative(Debug = "transparent")] // Replaced with manual Debug impl that delegates to inner field ``` **Note**: Some bounds use strings (`bound("E: EthSpec")`) for superstruct compatibility (`expected ','` errors). Co-Authored-By: Javier Chávarri Co-Authored-By: Mac L --- Cargo.lock | 22 +-- Cargo.toml | 2 +- beacon_node/beacon_chain/Cargo.toml | 2 +- .../src/beacon_fork_choice_store.rs | 8 +- .../beacon_chain/src/blob_verification.rs | 6 +- .../beacon_chain/src/block_verification.rs | 6 +- .../src/block_verification_types.rs | 10 +- .../src/data_column_verification.rs | 14 +- ...ght_client_finality_update_verification.rs | 6 +- ...t_client_optimistic_update_verification.rs | 6 +- .../beacon_chain/src/observed_operations.rs | 8 +- .../src/sync_committee_verification.rs | 6 +- beacon_node/network/Cargo.toml | 2 +- beacon_node/network/src/sync/batch.rs | 8 +- .../sync/block_lookups/single_block_lookup.rs | 26 ++-- beacon_node/operation_pool/Cargo.toml | 2 +- beacon_node/operation_pool/src/persistence.rs | 7 +- common/eth2/Cargo.toml | 2 +- common/eth2/src/lib.rs | 8 +- common/validator_dir/Cargo.toml | 2 +- common/validator_dir/src/validator_dir.rs | 8 +- consensus/state_processing/Cargo.toml | 2 +- .../state_processing/src/verify_operation.rs | 8 +- consensus/types/Cargo.toml | 2 +- consensus/types/src/attestation.rs | 22 +-- consensus/types/src/attester_slashing.rs | 14 +- consensus/types/src/beacon_block.rs | 10 +- consensus/types/src/beacon_block_body.rs | 10 +- consensus/types/src/beacon_state.rs | 6 +- .../types/src/beacon_state/committee_cache.rs | 8 +- consensus/types/src/blob_sidecar.rs | 8 +- consensus/types/src/chain_spec.rs | 8 +- consensus/types/src/data_column_sidecar.rs | 8 +- consensus/types/src/data_column_subnet_id.rs | 10 +- consensus/types/src/execution_block_hash.rs | 10 +- consensus/types/src/execution_payload.rs | 10 +- .../types/src/execution_payload_header.rs | 10 +- consensus/types/src/execution_requests.rs | 6 +- consensus/types/src/indexed_attestation.rs | 9 +- consensus/types/src/light_client_bootstrap.rs | 6 +- .../types/src/light_client_finality_update.rs | 6 +- consensus/types/src/light_client_header.rs | 6 +- .../src/light_client_optimistic_update.rs | 6 +- consensus/types/src/light_client_update.rs | 6 +- consensus/types/src/payload.rs | 26 ++-- consensus/types/src/runtime_var_list.rs | 6 +- consensus/types/src/signed_beacon_block.rs | 10 +- consensus/types/src/sync_aggregate.rs | 8 +- crypto/kzg/Cargo.toml | 2 +- crypto/kzg/src/kzg_commitment.rs | 6 +- slasher/Cargo.toml | 2 +- slasher/src/database/redb_impl.rs | 14 +- testing/ef_tests/Cargo.toml | 2 +- testing/ef_tests/src/handler.rs | 126 +++++++++--------- validator_manager/Cargo.toml | 2 +- validator_manager/src/import_validators.rs | 8 +- 56 files changed, 277 insertions(+), 287 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1d1108b1d0..e045c8697f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -862,7 +862,7 @@ dependencies = [ "bitvec 1.0.1", "bls", "criterion", - "derivative", + "educe", "eth2", "eth2_network_config", "ethereum_hashing", @@ -2543,7 +2543,7 @@ dependencies = [ "bls", "compare_fields", "context_deserialize", - "derivative", + "educe", "eth2_network_config", "ethereum_ssz", "ethereum_ssz_derive", @@ -2862,7 +2862,7 @@ dependencies = [ name = "eth2" version = "0.1.0" dependencies = [ - "derivative", + "educe", "eip_3076", "either", "enr", @@ -5014,7 +5014,7 @@ dependencies = [ "arbitrary", "c-kzg", "criterion", - "derivative", + "educe", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", @@ -6309,7 +6309,7 @@ dependencies = [ "beacon_processor", "bls", "delay_map", - "derivative", + "educe", "eth2", "eth2_network_config", "ethereum_ssz", @@ -6745,7 +6745,7 @@ version = "0.2.0" dependencies = [ "beacon_chain", "bitvec 1.0.1", - "derivative", + "educe", "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.10.5", @@ -8748,7 +8748,7 @@ version = "0.1.0" dependencies = [ "bincode", "byteorder", - "derivative", + "educe", "ethereum_ssz", "ethereum_ssz_derive", "filesystem", @@ -8917,7 +8917,7 @@ dependencies = [ "arbitrary", "beacon_chain", "bls", - "derivative", + "educe", "ethereum_hashing", "ethereum_ssz", "ethereum_ssz_derive", @@ -9855,7 +9855,7 @@ dependencies = [ "compare_fields", "context_deserialize", "criterion", - "derivative", + "educe", "eth2_interop_keypairs", "ethereum_hashing", "ethereum_serde_utils", @@ -10099,7 +10099,7 @@ version = "0.1.0" dependencies = [ "bls", "deposit_contract", - "derivative", + "educe", "eth2_keystore", "filesystem", "hex", @@ -10187,7 +10187,7 @@ dependencies = [ "beacon_chain", "clap", "clap_utils", - "derivative", + "educe", "environment", "eth2", "eth2_network_config", diff --git a/Cargo.toml b/Cargo.toml index 4d357816d9..15fea466f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -128,11 +128,11 @@ context_deserialize_derive = { path = "consensus/context_deserialize/context_des criterion = "0.5" delay_map = "0.4" deposit_contract = { path = "common/deposit_contract" } -derivative = "2" directory = { path = "common/directory" } dirs = "3" discv5 = { version = "0.10", features = ["libp2p"] } doppelganger_service = { path = "validator_client/doppelganger_service" } +educe = "0.6" eip_3076 = { path = "common/eip_3076" } either = "1.9" environment = { path = "lighthouse/environment" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index dca351cbac..e889f53bb0 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -18,7 +18,7 @@ test_backfill = [] alloy-primitives = { workspace = true } bitvec = { workspace = true } bls = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } eth2 = { workspace = true } eth2_network_config = { workspace = true } ethereum_hashing = { workspace = true } diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 440388661c..0c203009bb 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -5,7 +5,7 @@ //! reads when fork choice requires the validator balances of the justified state. use crate::{BeaconSnapshot, metrics}; -use derivative::Derivative; +use educe::Educe; use fork_choice::ForkChoiceStore; use proto_array::JustifiedBalances; use safe_arith::ArithError; @@ -127,10 +127,10 @@ impl BalancesCache { /// Implements `fork_choice::ForkChoiceStore` in order to provide a persistent backing to the /// `fork_choice::ForkChoice` struct. -#[derive(Debug, Derivative)] -#[derivative(PartialEq(bound = "E: EthSpec, Hot: ItemStore, Cold: ItemStore"))] +#[derive(Debug, Educe)] +#[educe(PartialEq(bound(E: EthSpec, Hot: ItemStore, Cold: ItemStore)))] pub struct BeaconForkChoiceStore, Cold: ItemStore> { - #[derivative(PartialEq = "ignore")] + #[educe(PartialEq(ignore))] store: Arc>, balances_cache: BalancesCache, time: Slot, diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 53f2eff0ca..874673b52e 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -1,4 +1,4 @@ -use derivative::Derivative; +use educe::Educe; use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; @@ -245,8 +245,8 @@ impl GossipVerifiedBlob { /// Wrapper over a `BlobSidecar` for which we have completed kzg verification. /// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`. -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq)] +#[derive(Debug, Educe, Clone, Encode, Decode)] +#[educe(PartialEq, Eq)] #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedBlob { blob: Arc>, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 691293b200..1ddc51cc35 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -66,7 +66,7 @@ use crate::{ beacon_chain::{BeaconForkChoice, ForkChoiceError}, metrics, }; -use derivative::Derivative; +use educe::Educe; use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; @@ -689,8 +689,8 @@ pub fn signature_verify_chain_segment( /// A wrapper around a `SignedBeaconBlock` that indicates it has been approved for re-gossiping on /// the p2p network. -#[derive(Derivative)] -#[derivative(Debug(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Debug(bound(T: BeaconChainTypes)))] pub struct GossipVerifiedBlock { pub block: Arc>, pub block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 1a0b188fdc..5978e97c4d 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -2,7 +2,7 @@ use crate::data_availability_checker::AvailabilityCheckError; pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; use crate::{PayloadVerificationOutcome, get_block_root}; -use derivative::Derivative; +use educe::Educe; use ssz_types::VariableList; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; @@ -26,8 +26,8 @@ use types::{ /// Note: We make a distinction over blocks received over gossip because /// in a post-deneb world, the blobs corresponding to a given block that are received /// over rpc do not contain the proposer signature for dos resistance. -#[derive(Clone, Derivative)] -#[derivative(Hash(bound = "E: EthSpec"))] +#[derive(Clone, Educe)] +#[educe(Hash(bound(E: EthSpec)))] pub struct RpcBlock { block_root: Hash256, block: RpcBlockInner, @@ -80,8 +80,8 @@ impl RpcBlock { /// Note: This variant is intentionally private because we want to safely construct the /// internal variants after applying consistency checks to ensure that the block and blobs /// are consistent with respect to each other. -#[derive(Debug, Clone, Derivative)] -#[derivative(Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Educe)] +#[educe(Hash(bound(E: EthSpec)))] enum RpcBlockInner { /// Single block lookup response. This should potentially hit the data availability cache. Block(Arc>), diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 7a8066351a..b998602566 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -4,7 +4,7 @@ use crate::block_verification::{ use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; use crate::observed_data_sidecars::{ObservationStrategy, Observe}; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, metrics}; -use derivative::Derivative; +use educe::Educe; use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; use proto_array::Block; @@ -296,8 +296,8 @@ impl GossipVerifiedDataColumn } /// Wrapper over a `DataColumnSidecar` for which we have completed kzg verification. -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq)] +#[derive(Debug, Educe, Clone, Encode, Decode)] +#[educe(PartialEq, Eq)] #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedDataColumn { data: Arc>, @@ -353,8 +353,8 @@ pub type CustodyDataColumnList = VariableList, ::NumberOfColumns>; /// Data column that we must custody -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Educe, Clone, Encode, Decode)] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[ssz(struct_behaviour = "transparent")] pub struct CustodyDataColumn { data: Arc>, @@ -383,8 +383,8 @@ impl CustodyDataColumn { } /// Data column that we must custody and has completed kzg verification -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq)] +#[derive(Debug, Educe, Clone, Encode, Decode)] +#[educe(PartialEq, Eq)] #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedCustodyDataColumn { data: Arc>, diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index fe62b8ef90..2dc4de7d04 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -1,5 +1,5 @@ use crate::{BeaconChain, BeaconChainTypes}; -use derivative::Derivative; +use educe::Educe; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; @@ -55,8 +55,8 @@ pub enum Error { } /// Wraps a `LightClientFinalityUpdate` that has been verified for propagation on the gossip network. -#[derive(Derivative)] -#[derivative(Clone(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Clone(bound(T: BeaconChainTypes)))] pub struct VerifiedLightClientFinalityUpdate { light_client_finality_update: LightClientFinalityUpdate, seen_timestamp: Duration, diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index b59390ea0c..4079a374f8 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -1,5 +1,5 @@ use crate::{BeaconChain, BeaconChainTypes}; -use derivative::Derivative; +use educe::Educe; use eth2::types::Hash256; use slot_clock::SlotClock; use std::time::Duration; @@ -49,8 +49,8 @@ pub enum Error { } /// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network. -#[derive(Derivative)] -#[derivative(Clone(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Clone(bound(T: BeaconChainTypes)))] pub struct VerifiedLightClientOptimisticUpdate { light_client_optimistic_update: LightClientOptimisticUpdate, pub parent_root: Hash256, diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 49614c5b54..4ca5371242 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,4 +1,4 @@ -use derivative::Derivative; +use educe::Educe; use smallvec::{SmallVec, smallvec}; use state_processing::{SigVerifiedOp, TransformPersist, VerifyOperation, VerifyOperationAt}; use std::collections::HashSet; @@ -14,8 +14,8 @@ pub const SMALL_VEC_SIZE: usize = 8; /// Stateful tracker for exit/slashing operations seen on the network. /// /// Implements the conditions for gossip verification of exits and slashings from the P2P spec. -#[derive(Debug, Derivative)] -#[derivative(Default(bound = "T: ObservableOperation, E: EthSpec"))] +#[derive(Debug, Educe)] +#[educe(Default(bound(T: ObservableOperation, E: EthSpec)))] pub struct ObservedOperations, E: EthSpec> { /// Indices of validators for whom we have already seen an instance of an operation `T`. /// @@ -26,7 +26,7 @@ pub struct ObservedOperations, E: EthSpec> { /// `attestation_1.attester_indices` and `attestation_2.attester_indices`. observed_validator_indices: HashSet, /// The name of the current fork. The default will be overwritten on first use. - #[derivative(Default(value = "ForkName::Base"))] + #[educe(Default(expression = ForkName::Base))] current_fork: ForkName, _phantom: PhantomData<(T, E)>, } diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 41d29d5526..e72e9a6b21 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -31,7 +31,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, metrics, observed_aggregates::ObserveOutcome, }; use bls::{PublicKeyBytes, verify_signature_sets}; -use derivative::Derivative; +use educe::Educe; use safe_arith::ArithError; use slot_clock::SlotClock; use ssz_derive::{Decode, Encode}; @@ -261,8 +261,8 @@ impl From for Error { } /// Wraps a `SignedContributionAndProof` that has been verified for propagation on the gossip network.\ -#[derive(Derivative)] -#[derivative(Clone(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Clone(bound(T: BeaconChainTypes)))] pub struct VerifiedSyncContribution { signed_aggregate: SignedContributionAndProof, participant_pubkeys: Vec, diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 5615148648..b60c5e6dbf 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -19,7 +19,7 @@ async-channel = { workspace = true } beacon_chain = { workspace = true } beacon_processor = { workspace = true } delay_map = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } fnv = { workspace = true } diff --git a/beacon_node/network/src/sync/batch.rs b/beacon_node/network/src/sync/batch.rs index ea0ef15f4b..8de386f5be 100644 --- a/beacon_node/network/src/sync/batch.rs +++ b/beacon_node/network/src/sync/batch.rs @@ -1,5 +1,5 @@ use beacon_chain::block_verification_types::RpcBlock; -use derivative::Derivative; +use educe::Educe; use lighthouse_network::PeerId; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::rpc::methods::DataColumnsByRangeRequest; @@ -78,8 +78,8 @@ pub enum BatchProcessingResult { NonFaultyFailure, } -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] /// A segment of a chain. pub struct BatchInfo { /// Start slot of the batch. @@ -97,7 +97,7 @@ pub struct BatchInfo { /// Whether this batch contains all blocks or all blocks and blobs. batch_type: ByRangeRequestType, /// Pin the generic - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] marker: std::marker::PhantomData<(E, B)>, } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 8fb3248a87..46897b2283 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -5,7 +5,7 @@ use crate::sync::network_context::{ SyncNetworkContext, }; use beacon_chain::{BeaconChainTypes, BlockProcessStatus}; -use derivative::Derivative; +use educe::Educe; use lighthouse_network::service::api_types::Id; use lighthouse_tracing::SPAN_SINGLE_BLOCK_LOOKUP; use parking_lot::RwLock; @@ -57,8 +57,8 @@ pub enum LookupRequestError { }, } -#[derive(Derivative)] -#[derivative(Debug(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Debug(bound(T: BeaconChainTypes)))] pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, @@ -67,7 +67,7 @@ pub struct SingleBlockLookup { /// the custody request to have an updated view of the peers that claim to have imported the /// block associated with this lookup. The peer set of a lookup can change rapidly, and faster /// than the lifetime of a custody request. - #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] + #[educe(Debug(method(fmt_peer_set_as_len)))] peers: Arc>>, block_root: Hash256, awaiting_parent: Option, @@ -369,10 +369,10 @@ impl SingleBlockLookup { } /// The state of the blob request component of a `SingleBlockLookup`. -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct BlobRequestState { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] pub block_root: Hash256, pub state: SingleLookupRequestState>, } @@ -387,10 +387,10 @@ impl BlobRequestState { } /// The state of the custody request component of a `SingleBlockLookup`. -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct CustodyRequestState { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] pub block_root: Hash256, pub state: SingleLookupRequestState>, } @@ -405,10 +405,10 @@ impl CustodyRequestState { } /// The state of the block request component of a `SingleBlockLookup`. -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct BlockRequestState { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] pub requested_block_root: Hash256, pub state: SingleLookupRequestState>>, } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index beaf818882..eeddb53c23 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -9,7 +9,7 @@ portable = ["beacon_chain/portable"] [dependencies] bitvec = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } itertools = { workspace = true } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 4d75453460..ee45c8dd05 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -3,7 +3,7 @@ use crate::OperationPool; use crate::attestation_storage::AttestationMap; use crate::bls_to_execution_changes::{BlsToExecutionChanges, ReceivedPreCapella}; use crate::sync_aggregate_id::SyncAggregateId; -use derivative::Derivative; +use educe::Educe; use parking_lot::RwLock; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -22,10 +22,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index d9b2f10198..4e04fa95da 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -30,7 +30,7 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } bls = { workspace = true } compare_fields = { workspace = true } context_deserialize = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 5264686792..1430582658 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -8,7 +8,7 @@ use crate::{ }; use crate::{Hash256, Slot, test_utils::TestRandom}; use crate::{IndexedAttestation, context_deserialize}; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::BitVector; @@ -45,11 +45,11 @@ impl From for Error { Decode, Encode, TestRandom, - Derivative, + Educe, TreeHash, ), context_deserialize(ForkName), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -66,7 +66,8 @@ impl From for Error { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, TreeHash, Encode, Derivative, Deserialize, PartialEq)] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, Educe, Deserialize)] +#[educe(PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] @@ -599,18 +600,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for Vec> */ #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Debug, - Clone, - Serialize, - Deserialize, - Decode, - Encode, - TestRandom, - Derivative, - TreeHash, - PartialEq, -)] +#[derive(Debug, Clone, Serialize, Deserialize, Decode, Encode, TestRandom, TreeHash, PartialEq)] #[context_deserialize(ForkName)] pub struct SingleAttestation { #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index adc3695f4a..2bfb65653c 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -4,7 +4,7 @@ use crate::indexed_attestation::{ }; use crate::{ContextDeserialize, ForkName}; use crate::{EthSpec, test_utils::TestRandom}; -use derivative::Derivative; +use educe::Educe; use rand::{Rng, RngCore}; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; @@ -16,7 +16,7 @@ use tree_hash_derive::TreeHash; variants(Base, Electra), variant_attributes( derive( - Derivative, + Educe, Debug, Clone, Serialize, @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; TestRandom, ), context_deserialize(ForkName), - derivative(PartialEq, Eq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Eq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec"), cfg_attr( feature = "arbitrary", @@ -42,8 +42,8 @@ use tree_hash_derive::TreeHash; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Educe)] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec", untagged)] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] @@ -57,8 +57,8 @@ pub struct AttesterSlashing { /// This is a copy of the `AttesterSlashing` enum but with `Encode` and `Decode` derived /// using the `union` behavior for the purposes of persistence on disk. We use a separate /// type so that we don't accidentally use this non-spec encoding in consensus objects. -#[derive(Debug, Clone, Encode, Decode, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Encode, Decode, Educe)] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[ssz(enum_behaviour = "union")] pub enum AttesterSlashingOnDisk { Base(AttesterSlashingBase), diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 61c32dd4ac..060709d655 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,7 +1,7 @@ use crate::attestation::AttestationBase; use crate::test_utils::TestRandom; use crate::*; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; @@ -27,9 +27,9 @@ use self::indexed_attestation::IndexedAttestationBase; Decode, TreeHash, TestRandom, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec, Payload: AbstractExecPayload")), + educe(PartialEq, Hash(bound(E: EthSpec, Payload: AbstractExecPayload))), serde( bound = "E: EthSpec, Payload: AbstractExecPayload", deny_unknown_fields @@ -52,8 +52,8 @@ use self::indexed_attestation::IndexedAttestationBase; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index e636fbb534..ced8fea4a9 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use derivative::Derivative; +use educe::Educe; use merkle_proof::{MerkleTree, MerkleTreeError}; use metastruct::metastruct; use serde::{Deserialize, Deserializer, Serialize}; @@ -39,9 +39,9 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; Decode, TreeHash, TestRandom, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec, Payload: AbstractExecPayload")), + educe(PartialEq, Hash(bound(E: EthSpec, Payload: AbstractExecPayload))), serde( bound = "E: EthSpec, Payload: AbstractExecPayload", deny_unknown_fields @@ -71,8 +71,8 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") )] -#[derive(Debug, Clone, Serialize, Deserialize, Derivative, TreeHash)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Educe, TreeHash)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index d1574be7cc..d13e223557 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -5,7 +5,7 @@ use crate::historical_summary::HistoricalSummary; use crate::test_utils::TestRandom; use crate::*; use compare_fields::CompareFields; -use derivative::Derivative; +use educe::Educe; use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use metastruct::{NumFields, metastruct}; @@ -245,7 +245,7 @@ impl From for Hash256 { variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( derive( - Derivative, + Educe, Debug, PartialEq, Serialize, @@ -262,7 +262,7 @@ impl From for Hash256 { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") ), - derivative(Clone), + educe(Clone), ), specific_variant_attributes( Base(metastruct( diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 06242e8d20..408c269da5 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -2,7 +2,7 @@ use crate::*; use core::num::NonZeroUsize; -use derivative::Derivative; +use educe::Educe; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode, four_byte_option_impl}; @@ -20,13 +20,13 @@ four_byte_option_impl!(four_byte_option_non_zero_usize, NonZeroUsize); /// Computes and stores the shuffling for an epoch. Provides various getters to allow callers to /// read the committees for the given epoch. -#[derive(Derivative, Debug, Default, Clone, Serialize, Deserialize, Encode, Decode)] -#[derivative(PartialEq)] +#[derive(Educe, Debug, Default, Clone, Serialize, Deserialize, Encode, Decode)] +#[educe(PartialEq)] pub struct CommitteeCache { #[ssz(with = "four_byte_option_epoch")] initialized_epoch: Option, shuffling: Vec, - #[derivative(PartialEq(compare_with = "compare_shuffling_positions"))] + #[educe(PartialEq(method(compare_shuffling_positions)))] shuffling_positions: Vec, committees_per_slot: u64, slots_per_epoch: u64, diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 2e8c257897..d2c7331a57 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -7,7 +7,7 @@ use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, }; use bls::Signature; -use derivative::Derivative; +use educe::Educe; use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Blob as KzgBlob, Kzg, KzgCommitment, KzgProof}; use merkle_proof::{MerkleTreeError, merkle_root_from_branch, verify_merkle_proof}; use rand::Rng; @@ -49,12 +49,10 @@ impl Ord for BlobIdentifier { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, -)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Educe)] #[context_deserialize(ForkName)] #[serde(bound = "E: EthSpec")] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] pub struct BlobSidecar { #[serde(with = "serde_utils::quoted_u64")] pub index: u64, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 5dedd930c6..a66080ada6 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -2,7 +2,7 @@ use crate::application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; use crate::blob_sidecar::BlobIdentifier; use crate::data_column_sidecar::DataColumnsByRootIdentifier; use crate::*; -use derivative::Derivative; +use educe::Educe; use ethereum_hashing::hash; use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; @@ -1566,15 +1566,15 @@ pub struct BlobParameters { // A wrapper around a vector of BlobParameters to ensure that the vector is reverse // sorted by epoch. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive(Debug, Derivative, Clone)] -#[derivative(PartialEq)] +#[derive(Debug, Educe, Clone)] +#[educe(PartialEq)] pub struct BlobSchedule { schedule: Vec, // This is a hack to prevent the blob schedule being serialized on the /eth/v1/config/spec // endpoint prior to the Fulu fork being scheduled. // // We can remove this once Fulu is live on mainnet. - #[derivative(PartialEq = "ignore")] + #[educe(PartialEq(ignore))] skip_serializing: bool, } diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 2272b1695c..62ce4467df 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -6,7 +6,7 @@ use crate::{ SignedBeaconBlockHeader, Slot, }; use bls::Signature; -use derivative::Derivative; +use educe::Educe; use kzg::Error as KzgError; use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; @@ -40,11 +40,9 @@ pub type DataColumnSidecarList = Vec>>; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, -)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Educe)] #[serde(bound = "E: EthSpec")] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[context_deserialize(ForkName)] pub struct DataColumnSidecar { #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data_column_subnet_id.rs index 4061cb4fdb..c6b8846c78 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data_column_subnet_id.rs @@ -1,18 +1,22 @@ //! Identifies each data column subnet by an integer identifier. use crate::ChainSpec; use crate::data_column_sidecar::ColumnIndex; -use derivative::Derivative; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use std::fmt::{self, Display}; use std::ops::{Deref, DerefMut}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Derivative, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[derivative(Debug = "transparent")] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct DataColumnSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); +impl fmt::Debug for DataColumnSubnetId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + std::fmt::Debug::fmt(&self.0, f) + } +} + impl DataColumnSubnetId { pub fn new(id: u64) -> Self { id.into() diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index d3065afbbb..31905d64df 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -1,18 +1,22 @@ use crate::FixedBytesExtended; use crate::Hash256; use crate::test_utils::TestRandom; -use derivative::Derivative; use rand::RngCore; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] -#[derivative(Debug = "transparent")] +#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] #[serde(transparent)] pub struct ExecutionBlockHash(#[serde(with = "serde_utils::b256_hex")] pub Hash256); +impl fmt::Debug for ExecutionBlockHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + std::fmt::Debug::fmt(&self.0, f) + } +} + impl ExecutionBlockHash { pub fn zero() -> Self { Self(Hash256::zero()) diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 7a899e5f02..3548f67db2 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,5 +1,5 @@ use crate::{test_utils::TestRandom, *}; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -27,10 +27,10 @@ pub type Withdrawals = VariableList::MaxWithdrawal Decode, TreeHash, TestRandom, - Derivative, + Educe, ), context_deserialize(ForkName), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -48,8 +48,8 @@ pub type Withdrawals = VariableList::MaxWithdrawal derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec", untagged)] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 2f5fac87a9..241ecb4ce6 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,5 +1,5 @@ use crate::{test_utils::TestRandom, *}; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -20,9 +20,9 @@ use tree_hash_derive::TreeHash; Decode, TreeHash, TestRandom, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -44,8 +44,8 @@ use tree_hash_derive::TreeHash; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec", untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs index 592dda5d5e..67396af71d 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution_requests.rs @@ -2,7 +2,7 @@ use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::{ConsolidationRequest, DepositRequest, EthSpec, ForkName, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; -use derivative::Derivative; +use educe::Educe; use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; use ssz::Encode; @@ -24,10 +24,10 @@ pub type ConsolidationRequests = arbitrary(bound = "E: EthSpec") )] #[derive( - Debug, Derivative, Default, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Debug, Educe, Default, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[serde(bound = "E: EthSpec")] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[context_deserialize(ForkName)] pub struct ExecutionRequests { pub deposits: DepositRequests, diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 4ba695b9d5..dc32884217 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -3,7 +3,7 @@ use crate::{ AggregateSignature, AttestationData, EthSpec, ForkName, VariableList, test_utils::TestRandom, }; use core::slice::Iter; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -28,11 +28,11 @@ use tree_hash_derive::TreeHash; Decode, Encode, TestRandom, - Derivative, + Educe, TreeHash, ), context_deserialize(ForkName), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -46,7 +46,8 @@ use tree_hash_derive::TreeHash; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, TreeHash, Encode, Derivative, Deserialize, PartialEq)] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, Educe, Deserialize)] +#[educe(PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 1345cee224..80d5bbacf9 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -5,7 +5,7 @@ use crate::{ LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, SignedBlindedBeaconBlock, Slot, SyncCommittee, light_client_update::*, test_utils::TestRandom, }; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -22,15 +22,15 @@ use tree_hash_derive::TreeHash; derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 644824f12c..e58d7f4d72 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -7,7 +7,7 @@ use crate::{ LightClientHeaderGloas, SignedBlindedBeaconBlock, light_client_update::*, test_utils::TestRandom, }; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; @@ -22,15 +22,15 @@ use tree_hash_derive::TreeHash; derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 162203138a..5820efcc91 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -8,7 +8,7 @@ use crate::{ ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, FixedVector, Hash256, SignedBlindedBeaconBlock, test_utils::TestRandom, }; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; @@ -23,15 +23,15 @@ use tree_hash_derive::TreeHash; derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 7528322d56..ca9957331f 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -6,7 +6,7 @@ use crate::{ LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, SignedBlindedBeaconBlock, light_client_update::*, }; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; @@ -24,15 +24,15 @@ use tree_hash_derive::TreeHash; derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index afb7ebc96d..ede9436c50 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -7,7 +7,7 @@ use crate::{ LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderFulu, LightClientHeaderGloas, SignedBlindedBeaconBlock, beacon_state, test_utils::TestRandom, }; -use derivative::Derivative; +use educe::Educe; use safe_arith::ArithError; use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; @@ -105,15 +105,15 @@ impl From for Error { derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 28dc10f938..370c73ad0a 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -1,5 +1,5 @@ use crate::{test_utils::TestRandom, *}; -use derivative::Derivative; +use educe::Educe; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; @@ -161,9 +161,9 @@ pub trait AbstractExecPayload: Decode, TestRandom, TreeHash, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -173,8 +173,8 @@ pub trait AbstractExecPayload: ssz(struct_behaviour = "transparent"), ), ref_attributes( - derive(Debug, Derivative, TreeHash), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + derive(Debug, Educe, TreeHash), + educe(PartialEq, Hash(bound(E: EthSpec))), tree_hash(enum_behaviour = "transparent"), ), map_into(ExecutionPayload), @@ -187,8 +187,8 @@ pub trait AbstractExecPayload: derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct FullPayload { @@ -531,9 +531,9 @@ impl TryFrom> for FullPayload { Decode, TestRandom, TreeHash, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -543,8 +543,8 @@ impl TryFrom> for FullPayload { ssz(struct_behaviour = "transparent"), ), ref_attributes( - derive(Debug, Derivative, TreeHash), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + derive(Debug, Educe, TreeHash), + educe(PartialEq, Hash(bound(E: EthSpec))), tree_hash(enum_behaviour = "transparent"), ), map_into(ExecutionPayloadHeader), @@ -556,8 +556,8 @@ impl TryFrom> for FullPayload { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct BlindedPayload { diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index d57c65b1b7..e7b846029e 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -1,5 +1,5 @@ use crate::ContextDeserialize; -use derivative::Derivative; +use educe::Educe; use serde::de::Error as DeError; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; @@ -44,8 +44,8 @@ use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, TreeHashType}; /// assert!(long.push(6).is_err()); /// /// ``` -#[derive(Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] +#[derive(Clone, Serialize, Deserialize, Educe)] +#[educe(PartialEq, Eq, Hash(bound(T: std::hash::Hash)))] #[serde(transparent)] pub struct RuntimeVariableList { vec: Vec, diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 979b91e30d..35d2faac48 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,7 +1,7 @@ use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, format_kzg_commitments}; use crate::test_utils::TestRandom; use crate::*; -use derivative::Derivative; +use educe::Educe; use merkle_proof::MerkleTree; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; @@ -51,10 +51,10 @@ impl From for Hash256 { Encode, Decode, TreeHash, - Derivative, + Educe, TestRandom ), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec, Payload: AbstractExecPayload"), cfg_attr( feature = "arbitrary", @@ -71,8 +71,8 @@ impl From for Hash256 { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 7a4ef8f026..ba6d840a52 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -2,7 +2,7 @@ use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::{AggregateSignature, BitVector, EthSpec, ForkName, SyncCommitteeContribution}; -use derivative::Derivative; +use educe::Educe; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -26,10 +26,8 @@ impl From for Error { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, -)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec")] #[context_deserialize(ForkName)] pub struct SyncAggregate { diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index 432fcc1792..5a36eb74f7 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" [dependencies] arbitrary = { workspace = true } c-kzg = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/crypto/kzg/src/kzg_commitment.rs b/crypto/kzg/src/kzg_commitment.rs index cfab09f63e..5a5e689429 100644 --- a/crypto/kzg/src/kzg_commitment.rs +++ b/crypto/kzg/src/kzg_commitment.rs @@ -1,5 +1,5 @@ use c_kzg::BYTES_PER_COMMITMENT; -use derivative::Derivative; +use educe::Educe; use ethereum_hashing::hash_fixed; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; @@ -11,8 +11,8 @@ use tree_hash::{Hash256, PackedEncoding, TreeHash}; pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; -#[derive(Derivative, Clone, Copy, Encode, Decode)] -#[derivative(PartialEq, Eq, Hash)] +#[derive(Educe, Clone, Copy, Encode, Decode)] +#[educe(PartialEq, Eq, Hash)] #[ssz(struct_behaviour = "transparent")] pub struct KzgCommitment(pub [u8; c_kzg::BYTES_PER_COMMITMENT]); diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index b2f6eca9c3..cca55bcef8 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -14,7 +14,7 @@ portable = ["types/portable"] [dependencies] bincode = { workspace = true } byteorder = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } filesystem = { workspace = true } diff --git a/slasher/src/database/redb_impl.rs b/slasher/src/database/redb_impl.rs index 4198e82645..570d7df131 100644 --- a/slasher/src/database/redb_impl.rs +++ b/slasher/src/database/redb_impl.rs @@ -7,7 +7,7 @@ use crate::{ *, }, }; -use derivative::Derivative; +use educe::Educe; use redb::{ReadableTable, TableDefinition}; use std::{borrow::Cow, path::PathBuf}; @@ -23,18 +23,18 @@ pub struct Database<'env> { _phantom: PhantomData<&'env ()>, } -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct RwTransaction<'env> { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] txn: redb::WriteTransaction, _phantom: PhantomData<&'env ()>, } -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct Cursor<'env> { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] txn: &'env redb::WriteTransaction, db: &'env Database<'env>, current_key: Option>, diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index d9afce0efe..581785e2a9 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -17,7 +17,7 @@ beacon_chain = { workspace = true } bls = { workspace = true } compare_fields = { workspace = true } context_deserialize = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index b49ab2d90d..a5b2ffada3 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -2,7 +2,7 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name::TypeName; use crate::{FeatureName, type_name}; use context_deserialize::ContextDeserialize; -use derivative::Derivative; +use educe::Educe; use std::fs::{self, DirEntry}; use std::marker::PhantomData; use std::path::PathBuf; @@ -154,8 +154,8 @@ pub trait Handler { macro_rules! bls_eth_handler { ($runner_name: ident, $case_name:ident, $handler_name:expr) => { - #[derive(Derivative)] - #[derivative(Default(bound = ""))] + #[derive(Educe)] + #[educe(Default)] pub struct $runner_name; impl Handler for $runner_name { @@ -174,8 +174,8 @@ macro_rules! bls_eth_handler { macro_rules! bls_handler { ($runner_name: ident, $case_name:ident, $handler_name:expr) => { - #[derive(Derivative)] - #[derivative(Default(bound = ""))] + #[derive(Educe)] + #[educe(Default)] pub struct $runner_name; impl Handler for $runner_name { @@ -335,8 +335,8 @@ impl SszStaticHandler { } /// Handler for SSZ types that implement `CachedTreeHash`. -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct SszStaticTHCHandler(PhantomData<(T, E)>); /// Handler for SSZ types that don't implement `ssz::Decode`. @@ -436,8 +436,8 @@ where } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct ShufflingHandler(PhantomData); impl Handler for ShufflingHandler { @@ -460,8 +460,8 @@ impl Handler for ShufflingHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct SanityBlocksHandler(PhantomData); impl Handler for SanityBlocksHandler { @@ -486,8 +486,8 @@ impl Handler for SanityBlocksHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct SanitySlotsHandler(PhantomData); impl Handler for SanitySlotsHandler { @@ -511,8 +511,8 @@ impl Handler for SanitySlotsHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct RandomHandler(PhantomData); impl Handler for RandomHandler { @@ -531,8 +531,8 @@ impl Handler for RandomHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct EpochProcessingHandler(PhantomData<(E, T)>); impl> Handler for EpochProcessingHandler { @@ -581,8 +581,8 @@ impl Handler for RewardsHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct ForkHandler(PhantomData); impl Handler for ForkHandler { @@ -601,8 +601,8 @@ impl Handler for ForkHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct TransitionHandler(PhantomData); impl Handler for TransitionHandler { @@ -621,8 +621,8 @@ impl Handler for TransitionHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct FinalityHandler(PhantomData); impl Handler for FinalityHandler { @@ -705,8 +705,8 @@ impl Handler for ForkChoiceHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct OptimisticSyncHandler(PhantomData); impl Handler for OptimisticSyncHandler { @@ -734,8 +734,8 @@ impl Handler for OptimisticSyncHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct GenesisValidityHandler(PhantomData); impl Handler for GenesisValidityHandler { @@ -754,8 +754,8 @@ impl Handler for GenesisValidityHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct GenesisInitializationHandler(PhantomData); impl Handler for GenesisInitializationHandler { @@ -774,8 +774,8 @@ impl Handler for GenesisInitializationHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGBlobToKZGCommitmentHandler(PhantomData); impl Handler for KZGBlobToKZGCommitmentHandler { @@ -794,8 +794,8 @@ impl Handler for KZGBlobToKZGCommitmentHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGComputeBlobKZGProofHandler(PhantomData); impl Handler for KZGComputeBlobKZGProofHandler { @@ -814,8 +814,8 @@ impl Handler for KZGComputeBlobKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGComputeKZGProofHandler(PhantomData); impl Handler for KZGComputeKZGProofHandler { @@ -834,8 +834,8 @@ impl Handler for KZGComputeKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGVerifyBlobKZGProofHandler(PhantomData); impl Handler for KZGVerifyBlobKZGProofHandler { @@ -854,8 +854,8 @@ impl Handler for KZGVerifyBlobKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGVerifyBlobKZGProofBatchHandler(PhantomData); impl Handler for KZGVerifyBlobKZGProofBatchHandler { @@ -874,8 +874,8 @@ impl Handler for KZGVerifyBlobKZGProofBatchHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGVerifyKZGProofHandler(PhantomData); impl Handler for KZGVerifyKZGProofHandler { @@ -894,8 +894,8 @@ impl Handler for KZGVerifyKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct GetCustodyGroupsHandler(PhantomData); impl Handler for GetCustodyGroupsHandler { @@ -914,8 +914,8 @@ impl Handler for GetCustodyGroupsHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct ComputeColumnsForCustodyGroupHandler(PhantomData); impl Handler for ComputeColumnsForCustodyGroupHandler { @@ -934,8 +934,8 @@ impl Handler for ComputeColumnsForCustodyGroupHandler } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGComputeCellsHandler(PhantomData); impl Handler for KZGComputeCellsHandler { @@ -954,8 +954,8 @@ impl Handler for KZGComputeCellsHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGComputeCellsAndKZGProofHandler(PhantomData); impl Handler for KZGComputeCellsAndKZGProofHandler { @@ -974,8 +974,8 @@ impl Handler for KZGComputeCellsAndKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGVerifyCellKZGProofBatchHandler(PhantomData); impl Handler for KZGVerifyCellKZGProofBatchHandler { @@ -994,8 +994,8 @@ impl Handler for KZGVerifyCellKZGProofBatchHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGRecoverCellsAndKZGProofHandler(PhantomData); impl Handler for KZGRecoverCellsAndKZGProofHandler { @@ -1014,8 +1014,8 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KzgInclusionMerkleProofValidityHandler(PhantomData); impl Handler for KzgInclusionMerkleProofValidityHandler { @@ -1038,8 +1038,8 @@ impl Handler for KzgInclusionMerkleProofValidityHandler(PhantomData); impl Handler for MerkleProofValidityHandler { @@ -1062,8 +1062,8 @@ impl Handler for MerkleProofValidityHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct LightClientUpdateHandler(PhantomData); impl Handler for LightClientUpdateHandler { @@ -1087,8 +1087,8 @@ impl Handler for LightClientUpdateHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct OperationsHandler(PhantomData<(E, O)>); impl> Handler for OperationsHandler { @@ -1107,8 +1107,8 @@ impl> Handler for OperationsHandler } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct SszGenericHandler(PhantomData); impl Handler for SszGenericHandler { diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 9192f0e86b..6ef179fbe9 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -8,7 +8,7 @@ edition = { workspace = true } account_utils = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } eth2_network_config = { workspace = true } diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 5f5f049ed9..24917f7d1b 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -3,7 +3,7 @@ use crate::DumpConfig; use account_utils::eth2_keystore::Keystore; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use derivative::Derivative; +use educe::Educe; use eth2::lighthouse_vc::types::KeystoreJsonStr; use eth2::{SensitiveUrl, lighthouse_vc::std_types::ImportKeystoreStatus}; use serde::{Deserialize, Serialize}; @@ -159,15 +159,15 @@ pub fn cli_app() -> Command { ) } -#[derive(Clone, PartialEq, Serialize, Deserialize, Derivative)] -#[derivative(Debug)] +#[derive(Clone, PartialEq, Serialize, Deserialize, Educe)] +#[educe(Debug)] pub struct ImportConfig { pub validators_file_path: Option, pub keystore_file_path: Option, pub vc_url: SensitiveUrl, pub vc_token_path: PathBuf, pub ignore_duplicates: bool, - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] pub password: Option>, pub fee_recipient: Option
, pub gas_limit: Option, From bcf36535f20137388780619a36f6bd0c65b98f26 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Thu, 6 Nov 2025 11:53:51 -0800 Subject: [PATCH 43/44] Fix tests --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index f4b80eef18..b6ff73690e 100644 --- a/Makefile +++ b/Makefile @@ -179,14 +179,14 @@ test-beacon-chain-%: env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain # Run the tests in the `http_api` crate for recent forks. -test-http-api: $(patsubst %,test-http-api-%,$(RECENT_FORKS)) +test-http-api: $(patsubst %,test-http-api-%,$(RECENT_FORKS_BEFORE_GLOAS)) test-http-api-%: env FORK_NAME=$* cargo nextest run --release --features "beacon_chain/fork_from_env" -p http_api # Run the tests in the `operation_pool` crate for all known forks. -test-op-pool: $(patsubst %,test-op-pool-%,$(RECENT_FORKS)) +test-op-pool: $(patsubst %,test-op-pool-%,$(RECENT_FORKS_BEFORE_GLOAS)) test-op-pool-%: env FORK_NAME=$* cargo nextest run --release \ @@ -195,7 +195,7 @@ test-op-pool-%: # Run the tests in the `network` crate for all known forks. # TODO(EIP-7732) Extend to support gloas by using RECENT_FORKS instead -test-network: $(patsubst %,test-network-%,$(FORKS_BEFORE_GLOAS)) +test-network: $(patsubst %,test-network-%,$(RECENT_FORKS_BEFORE_GLOAS)) test-network-%: env FORK_NAME=$* cargo nextest run --release \ From b2a5337ce560868e8e5806ec596e615e5b40fd30 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Thu, 6 Nov 2025 22:55:38 -0800 Subject: [PATCH 44/44] Educe migration --- consensus/types/src/execution_payload_bid.rs | 6 +++--- consensus/types/src/execution_payload_envelope.rs | 10 +++++----- consensus/types/src/payload_attestation.rs | 8 +++----- consensus/types/src/signed_execution_payload_bid.rs | 8 +++----- .../types/src/signed_execution_payload_envelope.rs | 10 +++++----- 5 files changed, 19 insertions(+), 23 deletions(-) diff --git a/consensus/types/src/execution_payload_bid.rs b/consensus/types/src/execution_payload_bid.rs index 1449dbc90f..bb8c26b585 100644 --- a/consensus/types/src/execution_payload_bid.rs +++ b/consensus/types/src/execution_payload_bid.rs @@ -1,15 +1,15 @@ use crate::{test_utils::TestRandom, *}; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; #[derive( - Default, Debug, Clone, Serialize, Encode, Decode, Deserialize, TreeHash, Derivative, TestRandom, + Default, Debug, Clone, Serialize, Encode, Decode, Deserialize, TreeHash, Educe, TestRandom, )] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derivative(PartialEq, Hash)] +#[educe(PartialEq, Hash)] #[context_deserialize(ForkName)] // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#executionpayloadbid pub struct ExecutionPayloadBid { diff --git a/consensus/types/src/execution_payload_envelope.rs b/consensus/types/src/execution_payload_envelope.rs index 81539e519a..7cd1d1be80 100644 --- a/consensus/types/src/execution_payload_envelope.rs +++ b/consensus/types/src/execution_payload_envelope.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; use beacon_block_body::KzgCommitments; -use derivative::Derivative; +use educe::Educe; use serde::de::{Deserializer, Error as _}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -22,10 +22,10 @@ use tree_hash_derive::TreeHash; Decode, TreeHash, TestRandom, - Derivative + Educe ), cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary)), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr(feature = "arbitrary", arbitrary(bound = "E: EthSpec")) ), @@ -36,8 +36,8 @@ use tree_hash_derive::TreeHash; cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec", untagged)] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] diff --git a/consensus/types/src/payload_attestation.rs b/consensus/types/src/payload_attestation.rs index 48b0a3e3b3..d4e2dd2138 100644 --- a/consensus/types/src/payload_attestation.rs +++ b/consensus/types/src/payload_attestation.rs @@ -1,18 +1,16 @@ use crate::test_utils::TestRandom; use crate::*; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -#[derive( - TestRandom, TreeHash, Debug, Clone, Encode, Decode, Serialize, Deserialize, Derivative, -)] +#[derive(TestRandom, TreeHash, Debug, Clone, Encode, Decode, Serialize, Deserialize, Educe)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[serde(bound = "E: EthSpec", deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", arbitrary(bound = "E: EthSpec"))] -#[derivative(PartialEq, Hash)] +#[educe(PartialEq, Hash)] #[context_deserialize(ForkName)] pub struct PayloadAttestation { pub aggregation_bits: BitList, diff --git a/consensus/types/src/signed_execution_payload_bid.rs b/consensus/types/src/signed_execution_payload_bid.rs index 58a50977b3..0d5daacf50 100644 --- a/consensus/types/src/signed_execution_payload_bid.rs +++ b/consensus/types/src/signed_execution_payload_bid.rs @@ -1,16 +1,14 @@ use crate::test_utils::TestRandom; use crate::*; -use derivative::Derivative; +use educe::Educe; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -#[derive( - TestRandom, TreeHash, Debug, Clone, Encode, Decode, Serialize, Deserialize, Derivative, -)] +#[derive(TestRandom, TreeHash, Debug, Clone, Encode, Decode, Serialize, Deserialize, Educe)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derivative(PartialEq, Hash)] +#[educe(PartialEq, Hash)] #[context_deserialize(ForkName)] // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#signedexecutionpayloadbid pub struct SignedExecutionPayloadBid { diff --git a/consensus/types/src/signed_execution_payload_envelope.rs b/consensus/types/src/signed_execution_payload_envelope.rs index 96276a764b..3522b31999 100644 --- a/consensus/types/src/signed_execution_payload_envelope.rs +++ b/consensus/types/src/signed_execution_payload_envelope.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use derivative::Derivative; +use educe::Educe; use serde::de::{Deserializer, Error as _}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -20,10 +20,10 @@ use tree_hash_derive::TreeHash; Decode, TreeHash, TestRandom, - Derivative + Educe ), cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary)), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr(feature = "arbitrary", arbitrary(bound = "E: EthSpec")) ), @@ -34,8 +34,8 @@ use tree_hash_derive::TreeHash; cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec", untagged)] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")]