From b57d046c4ad9cf60a8053c4c43ea99e4b326bc01 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sun, 2 Nov 2025 16:51:42 -0800 Subject: [PATCH 1/5] Fix CGC backfill race condition (#8267) During custody backfill sync there could be an edge case where we update CGC at the same time where we are importing a batch of columns which may cause us to incorrectly overwrite values when calling `backfill_validator_custody_requirements`. To prevent this race condition, the expected cgc is now passed into this function and is used to check if the expected cgc == the current validator cgc. If the values arent equal, this probably indicates that a very recent CGC occurred so we do not prune/update values in the `epoch_validator_custody_requirements` map. Co-Authored-By: Eitan Seri-Levi --- .../beacon_chain/src/custody_context.rs | 79 +++++++++++++-- .../src/historical_data_columns.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 97 ++++--------------- .../src/network_beacon_processor/mod.rs | 4 +- .../network_beacon_processor/sync_methods.rs | 10 +- .../src/sync/custody_backfill_sync/mod.rs | 1 + 6 files changed, 103 insertions(+), 91 deletions(-) diff --git a/beacon_node/beacon_chain/src/custody_context.rs b/beacon_node/beacon_chain/src/custody_context.rs index 0da0e7573e..9a6f51174a 100644 --- a/beacon_node/beacon_chain/src/custody_context.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -134,8 +134,17 @@ impl ValidatorRegistrations { /// /// This is done by pruning all values on/after `effective_epoch` and updating the map to store /// the latest validator custody requirements for the `effective_epoch`. - pub fn backfill_validator_custody_requirements(&mut self, effective_epoch: Epoch) { + pub fn backfill_validator_custody_requirements( + &mut self, + effective_epoch: Epoch, + expected_cgc: u64, + ) { if let Some(latest_validator_custody) = self.latest_validator_custody_requirement() { + // If the expected cgc isn't equal to the latest validator custody a very recent cgc change may have occurred. + // We should not update the mapping. + if expected_cgc != latest_validator_custody { + return; + } // Delete records if // 1. The epoch is greater than or equal than `effective_epoch` // 2. the cgc requirements match the latest validator custody requirements @@ -517,10 +526,14 @@ impl CustodyContext { /// The node has completed backfill for this epoch. Update the internal records so the function /// [`Self::custody_columns_for_epoch()`] returns up-to-date results. - pub fn update_and_backfill_custody_count_at_epoch(&self, effective_epoch: Epoch) { + pub fn update_and_backfill_custody_count_at_epoch( + &self, + effective_epoch: Epoch, + expected_cgc: u64, + ) { self.validator_registrations .write() - .backfill_validator_custody_requirements(effective_epoch); + .backfill_validator_custody_requirements(effective_epoch, expected_cgc); } } @@ -604,11 +617,13 @@ mod tests { custody_context: &CustodyContext, start_epoch: Epoch, end_epoch: Epoch, + expected_cgc: u64, ) { assert!(start_epoch >= end_epoch); // Call from end_epoch down to start_epoch (inclusive), simulating backfill for epoch in (end_epoch.as_u64()..=start_epoch.as_u64()).rev() { - custody_context.update_and_backfill_custody_count_at_epoch(Epoch::new(epoch)); + custody_context + .update_and_backfill_custody_count_at_epoch(Epoch::new(epoch), expected_cgc); } } @@ -1368,7 +1383,7 @@ mod tests { ); // Backfill from epoch 20 down to 15 (simulating backfill) - complete_backfill_for_epochs(&custody_context, head_epoch, Epoch::new(15)); + complete_backfill_for_epochs(&custody_context, head_epoch, Epoch::new(15), final_cgc); // After backfilling to epoch 15, it should use latest CGC (32) assert_eq!( @@ -1406,7 +1421,7 @@ mod tests { let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); // Backfill to epoch 15 (between the two CGC increases) - complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15)); + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15), final_cgc); // Verify epochs 15 - 20 return latest CGC (32) for epoch in 15..=20 { @@ -1424,4 +1439,56 @@ mod tests { ); } } + + #[test] + fn attempt_backfill_with_invalid_cgc() { + let spec = E::default_spec(); + let initial_cgc = 8u64; + let mid_cgc = 16u64; + let final_cgc = 32u64; + + // Setup: Node restart after multiple validator registrations causing CGC increases + let head_epoch = Epoch::new(20); + let epoch_and_cgc_tuples = vec![ + (Epoch::new(0), initial_cgc), + (Epoch::new(10), mid_cgc), + (head_epoch, final_cgc), + ]; + let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); + + // Backfill to epoch 15 (between the two CGC increases) + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15), final_cgc); + + // Verify epochs 15 - 20 return latest CGC (32) + for epoch in 15..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + + // Attempt backfill with an incorrect cgc value + complete_backfill_for_epochs( + &custody_context, + Epoch::new(20), + Epoch::new(15), + initial_cgc, + ); + + // Verify epochs 15 - 20 still return latest CGC (32) + for epoch in 15..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + + // Verify epochs 10-14 still return mid_cgc (16) + for epoch in 10..14 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + mid_cgc, + ); + } + } } diff --git a/beacon_node/beacon_chain/src/historical_data_columns.rs b/beacon_node/beacon_chain/src/historical_data_columns.rs index 7e196eb75e..9304f06570 100644 --- a/beacon_node/beacon_chain/src/historical_data_columns.rs +++ b/beacon_node/beacon_chain/src/historical_data_columns.rs @@ -54,6 +54,7 @@ impl BeaconChain { &self, epoch: Epoch, historical_data_column_sidecar_list: DataColumnSidecarList, + expected_cgc: u64, ) -> Result { let mut total_imported = 0; let mut ops = vec![]; @@ -136,7 +137,7 @@ impl BeaconChain { self.data_availability_checker .custody_context() - .update_and_backfill_custody_count_at_epoch(epoch); + .update_and_backfill_custody_count_at_epoch(epoch, expected_cgc); self.safely_backfill_data_column_custody_info(epoch) .map_err(|e| HistoricalDataColumnError::BeaconChainError(Box::new(e)))?; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 53e841692e..7891b22432 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3182,6 +3182,8 @@ async fn weak_subjectivity_sync_test( assert_eq!(store.get_anchor_info().state_upper_limit, Slot::new(0)); } +// This test prunes data columns from epoch 0 and then tries to re-import them via +// the same code paths that custody backfill sync imports data columns #[tokio::test] async fn test_import_historical_data_columns_batch() { let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); @@ -3189,6 +3191,7 @@ async fn test_import_historical_data_columns_batch() { let store = get_store_generic(&db_path, StoreConfig::default(), spec); let start_slot = Epoch::new(0).start_slot(E::slots_per_epoch()) + 1; let end_slot = Epoch::new(0).end_slot(E::slots_per_epoch()); + let cgc = 128; let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); @@ -3208,6 +3211,7 @@ async fn test_import_historical_data_columns_batch() { let mut data_columns_list = vec![]; + // Get all data columns for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); @@ -3227,6 +3231,7 @@ async fn test_import_historical_data_columns_batch() { harness.advance_slot(); + // Prune data columns harness .chain .store @@ -3238,21 +3243,25 @@ async fn test_import_historical_data_columns_batch() { .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); + // Assert that data columns no longer exist for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); assert!(data_columns.is_none()) } + // Re-import deleted data columns harness .chain - .import_historical_data_column_batch(Epoch::new(0), data_columns_list) + .import_historical_data_column_batch(Epoch::new(0), data_columns_list, cgc) .unwrap(); + let block_root_iter = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); + // Assert that data columns now exist for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); @@ -3261,6 +3270,7 @@ async fn test_import_historical_data_columns_batch() { } // This should verify that a data column sidecar containing mismatched block roots should fail to be imported. +// This also covers any test cases related to data columns with incorrect/invalid/mismatched block roots. #[tokio::test] async fn test_import_historical_data_columns_batch_mismatched_block_root() { let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); @@ -3268,6 +3278,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { let store = get_store_generic(&db_path, StoreConfig::default(), spec); let start_slot = Slot::new(1); let end_slot = Slot::new(E::slots_per_epoch() * 2 - 1); + let cgc = 128; let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); @@ -3287,6 +3298,8 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { let mut data_columns_list = vec![]; + // Get all data columns from start_slot to end_slot + // and mutate the data columns with an invalid block root for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); @@ -3312,6 +3325,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { harness.advance_slot(); + // Prune blobs harness .chain .store @@ -3323,17 +3337,20 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); + // Assert there are no columns between start_slot and end_slot for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); assert!(data_columns.is_none()) } + // Attempt to import data columns with invalid block roots and expect a failure let error = harness .chain .import_historical_data_column_batch( start_slot.epoch(E::slots_per_epoch()), data_columns_list, + cgc, ) .unwrap_err(); @@ -3343,84 +3360,6 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { )); } -// This should verify that a data column sidecar associated to a block root that doesn't exist in the store cannot -// be imported. -#[tokio::test] -async fn test_import_historical_data_columns_batch_no_block_found() { - let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); - let db_path = tempdir().unwrap(); - let store = get_store_generic(&db_path, StoreConfig::default(), spec); - let start_slot = Slot::new(1); - let end_slot = Slot::new(E::slots_per_epoch() * 2 - 1); - - let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); - - harness - .extend_chain( - (E::slots_per_epoch() * 2) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - harness.advance_slot(); - - let block_root_iter = harness - .chain - .forwards_iter_block_roots_until(start_slot, end_slot) - .unwrap(); - - let mut data_columns_list = vec![]; - - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_some()); - - for data_column in data_columns.unwrap() { - let mut data_column = (*data_column).clone(); - data_column.signed_block_header.message.body_root = Hash256::ZERO; - data_columns_list.push(Arc::new(data_column)); - } - } - - harness - .extend_chain( - (E::slots_per_epoch() * 4) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - harness.advance_slot(); - - harness - .chain - .store - .try_prune_blobs(true, Epoch::new(2)) - .unwrap(); - - let block_root_iter = harness - .chain - .forwards_iter_block_roots_until(start_slot, end_slot) - .unwrap(); - - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_none()) - } - - let error = harness - .chain - .import_historical_data_column_batch(Epoch::new(0), data_columns_list) - .unwrap_err(); - - assert!(matches!( - error, - HistoricalDataColumnError::NoBlockFound { .. } - )); -} - /// Test that blocks and attestations that refer to states around an unaligned split state are /// processed correctly. #[tokio::test] diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7441e92871..5fa2361f28 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -497,9 +497,11 @@ impl NetworkBeaconProcessor { self: &Arc, batch_id: CustodyBackfillBatchId, data_columns: DataColumnSidecarList, + expected_cgc: u64, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || processor.process_historic_data_columns(batch_id, data_columns); + let process_fn = + move || processor.process_historic_data_columns(batch_id, data_columns, expected_cgc); let work = Work::ChainSegmentBackfill(Box::new(process_fn)); diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 41b12fa01b..41160fcfe4 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -426,6 +426,7 @@ impl NetworkBeaconProcessor { &self, batch_id: CustodyBackfillBatchId, downloaded_columns: DataColumnSidecarList, + expected_cgc: u64, ) { let _guard = debug_span!( SPAN_CUSTODY_BACKFILL_SYNC_IMPORT_COLUMNS, @@ -435,10 +436,11 @@ impl NetworkBeaconProcessor { .entered(); let sent_columns = downloaded_columns.len(); - let result = match self - .chain - .import_historical_data_column_batch(batch_id.epoch, downloaded_columns) - { + let result = match self.chain.import_historical_data_column_batch( + batch_id.epoch, + downloaded_columns, + expected_cgc, + ) { Ok(imported_columns) => { metrics::inc_counter_by( &metrics::BEACON_PROCESSOR_CUSTODY_BACKFILL_COLUMN_IMPORT_SUCCESS_TOTAL, diff --git a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs index 69df3422e6..5c5505083f 100644 --- a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs @@ -504,6 +504,7 @@ impl CustodyBackFillSync { run_id: self.run_id, }, data_columns, + self.cgc, ) { crit!( msg = "process_batch", From 25832e586246823007209e6841776d772dd8eda4 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sun, 2 Nov 2025 22:53:13 -0800 Subject: [PATCH 2/5] Add mainnet configs (#8344) #8135 mainnet config PR: https://github.com/eth-clients/mainnet/pull/11 Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Michael Sproul Co-Authored-By: Tan Chee Keong --- .../src/data_column_verification.rs | 16 ++++ .../test_utils/execution_block_generator.rs | 40 +++++++- .../mainnet/config.yaml | 95 +++++++++++++------ consensus/types/presets/gnosis/electra.yaml | 3 +- consensus/types/src/chain_spec.rs | 34 ++++++- 5 files changed, 150 insertions(+), 38 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 07f85b045a..61fc0677b1 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -850,6 +850,22 @@ mod test { .build(); harness.advance_slot(); + // Check block generator timestamp conversion sanity. + { + let exec_block_generator = harness.execution_block_generator(); + assert_eq!( + exec_block_generator + .timestamp_to_slot_post_capella(exec_block_generator.osaka_time.unwrap()), + 0 + ); + assert_eq!( + exec_block_generator.timestamp_to_slot_post_capella( + exec_block_generator.osaka_time.unwrap() + harness.spec.seconds_per_slot + ), + 1 + ); + } + let verify_fn = |column_sidecar: DataColumnSidecar| { GossipVerifiedDataColumn::<_>::new_for_block_publishing( column_sidecar.into(), diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index f1d07ae258..44e72cba6b 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -21,7 +21,7 @@ use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, - KzgProofs, Transaction, Transactions, Uint256, + KzgProofs, Slot, Transaction, Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; @@ -265,6 +265,37 @@ impl ExecutionBlockGenerator { ForkName::Bellatrix } + /// Get the timestamp at which `fork` activates. + /// + /// This function will panic if the `fork` is not enabled or is `<= ForkName::Bellatrix`. + pub fn get_fork_timestamp_post_capella(&self, fork: ForkName) -> u64 { + match fork { + ForkName::Gloas => self.amsterdam_time, + ForkName::Fulu => self.osaka_time, + ForkName::Electra => self.prague_time, + ForkName::Deneb => self.cancun_time, + ForkName::Capella => self.shanghai_time, + _ => panic!("only the Capella fork or later is supported"), + } + .unwrap_or_else(|| panic!("fork is {fork} but no corresponding timestamp is set")) + } + + /// This is a slightly nasty method for converting timestamps to slots, but it will suffice + /// until we can plumb through a slot clock. + pub fn timestamp_to_slot_post_capella(&self, timestamp: u64) -> Slot { + let fork = self.get_fork_at_timestamp(timestamp); + let fork_epoch = self.spec.fork_epoch(fork).unwrap(); + let fork_timestamp = self.get_fork_timestamp_post_capella(fork); + + // Number of slots since fork. + let slot_offset = timestamp + .checked_sub(fork_timestamp) + .expect("timestamp should be >= fork timestamp") + / self.spec.seconds_per_slot; + + fork_epoch.start_slot(E::slots_per_epoch()) + Slot::new(slot_offset) + } + pub fn execution_block_by_number(&self, number: u64) -> Option { self.block_by_number(number) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) @@ -734,9 +765,10 @@ impl ExecutionBlockGenerator { if fork_name.deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - // TODO(EIP-7892): see FIXME below - // FIXME: this will break with BPO forks. This function needs to calculate the epoch based on block timestamp.. - let max_blobs = self.spec.max_blobs_per_block_within_fork(fork_name) as usize; + let epoch = self + .timestamp_to_slot_post_capella(execution_payload.timestamp()) + .epoch(E::slots_per_epoch()); + let max_blobs = self.spec.max_blobs_per_block(epoch) as usize; let num_blobs = rng.random_range(self.min_blobs_count..=max_blobs); let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index ca7f85b512..49168018cb 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -6,7 +6,9 @@ PRESET_BASE: 'mainnet' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one +# * 'sepolia' - testnet # * 'holesky' - testnet +# * 'hoodi' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'mainnet' @@ -52,23 +54,37 @@ ELECTRA_FORK_VERSION: 0x05000000 ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC # Fulu FULU_FORK_VERSION: 0x06000000 -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 411392 # December 3, 2025, 09:49:11pm UTC # Gloas GLOAS_FORK_VERSION: 0x07000000 GLOAS_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- -# 12 seconds +# 12 seconds (*deprecated*) SECONDS_PER_SLOT: 12 +# 12000 milliseconds +SLOT_DURATION_MS: 12000 # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours +# 2**8 (= 256) epochs MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours +# 2**8 (= 256) epochs SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours +# 2**11 (= 2,048) Eth1 blocks ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle # --------------------------------------------------------------- @@ -78,13 +94,21 @@ INACTIVITY_SCORE_BIAS: 4 INACTIVITY_SCORE_RECOVERY_RATE: 16 # 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) +# 2**2 (= 4) validators MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 -# [New in Deneb:EIP7514] 2**3 (= 8) + +# Deneb +# 2**3 (= 8) (*deprecated*) MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 +# Electra +# 2**7 * 10**9 (= 128,000,000,000) Gwei +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) Gwei +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 + # Fork choice # --------------------------------------------------------------- # 40% @@ -93,7 +117,7 @@ PROPOSER_SCORE_BOOST: 40 REORG_HEAD_WEIGHT_THRESHOLD: 20 # 160% REORG_PARENT_WEIGHT_THRESHOLD: 160 -# `2` epochs +# 2 epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 # Deposit contract @@ -105,18 +129,19 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- -# `10 * 2**20` (= 10485760, 10 MiB) +# 10 * 2**20 (= 10,485,760) bytes, 10 MiB MAX_PAYLOAD_SIZE: 10485760 -# `2**10` (= 1024) +# 2**10 (= 1,024) blocks MAX_REQUEST_BLOCKS: 1024 -# `2**8` (= 256) +# 2**8 (= 256) epochs EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 -# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +# MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33,024) epochs MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 # 5s TTFB_TIMEOUT: 5 # 10s RESP_TIMEOUT: 10 +# 2**5 (= 32) slots ATTESTATION_PROPAGATION_SLOT_RANGE: 32 # 500ms MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 @@ -124,45 +149,59 @@ MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 # 2 subnets per node SUBNETS_PER_NODE: 2 -# 2**8 (= 64) +# 2**6 (= 64) subnets ATTESTATION_SUBNET_COUNT: 64 +# 0 bits ATTESTATION_SUBNET_EXTRA_BITS: 0 -# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS (= 6 + 0) bits ATTESTATION_SUBNET_PREFIX_BITS: 6 ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # Deneb -# `2**7` (=128) +# 2**7 (= 128) blocks MAX_REQUEST_BLOCKS_DENEB: 128 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK -MAX_REQUEST_BLOB_SIDECARS: 768 -# `2**12` (= 4096 epochs, ~18 days) +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 -# `6` +# 6 subnets BLOB_SIDECAR_SUBNET_COUNT: 6 -# `uint64(6)` +# 6 blobs MAX_BLOBS_PER_BLOCK: 6 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK (= 128 * 6) sidecars +MAX_REQUEST_BLOB_SIDECARS: 768 # Electra -# 2**7 * 10**9 (= 128,000,000,000) -MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 -# 2**8 * 10**9 (= 256,000,000,000) -MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 -# `9` +# 9 subnets BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 -# `uint64(9)` +# 9 blobs MAX_BLOBS_PER_BLOCK_ELECTRA: 9 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA (= 128 * 9) sidecars MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 # Fulu +# 2**7 (= 128) groups NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS (= 128 * 128) sidecars MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +# 2**3 (= 8) samples SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 -# Gloas \ No newline at end of file +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + - EPOCH: 412672 # December 9, 2025, 02:21:11pm UTC + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 419072 # January 7, 2026, 01:01:11am UTC + MAX_BLOBS_PER_BLOCK: 21 + +# Gloas diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index 42afbb233e..6885667c6e 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -41,8 +41,7 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # Withdrawals processing # --------------------------------------------------------------- -# 2**3 ( = 8) pending withdrawals -MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 6 # Pending deposits processing # --------------------------------------------------------------- diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 93f5140383..3565c714e0 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -87,12 +87,18 @@ pub struct ChainSpec { */ pub genesis_delay: u64, pub seconds_per_slot: u64, + pub slot_duration_ms: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub max_seed_lookahead: Epoch, pub min_epochs_to_inactivity_penalty: u64, pub min_validator_withdrawability_delay: Epoch, pub shard_committee_period: u64, + pub proposer_reorg_cutoff_bps: u64, + pub attestation_due_bps: u64, + pub aggregate_due_bps: u64, + pub sync_message_due_bps: u64, + pub contribution_due_bps: u64, /* * Reward and penalty quotients @@ -964,12 +970,18 @@ impl ChainSpec { */ genesis_delay: 604800, // 7 days seconds_per_slot: 12, + slot_duration_ms: 12000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), max_seed_lookahead: Epoch::new(4), min_epochs_to_inactivity_penalty: 4, min_validator_withdrawability_delay: Epoch::new(256), shard_committee_period: 256, + proposer_reorg_cutoff_bps: 1667, + attestation_due_bps: 3333, + aggregate_due_bps: 6667, + sync_message_due_bps: 3333, + contribution_due_bps: 6667, /* * Reward and penalty quotients @@ -1098,7 +1110,7 @@ impl ChainSpec { * Fulu hard fork params */ fulu_fork_version: [0x06, 0x00, 0x00, 0x00], - fulu_fork_epoch: None, + fulu_fork_epoch: Some(Epoch::new(411392)), custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, @@ -1158,7 +1170,16 @@ impl ChainSpec { /* * Networking Fulu specific */ - blob_schedule: BlobSchedule::default(), + blob_schedule: BlobSchedule::new(vec![ + BlobParameters { + epoch: Epoch::new(412672), + max_blobs_per_block: 15, + }, + BlobParameters { + epoch: Epoch::new(419072), + max_blobs_per_block: 21, + }, + ]), min_epochs_for_data_column_sidecars_requests: default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), @@ -1310,12 +1331,18 @@ impl ChainSpec { */ genesis_delay: 6000, // 100 minutes seconds_per_slot: 5, + slot_duration_ms: 5000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), max_seed_lookahead: Epoch::new(4), min_epochs_to_inactivity_penalty: 4, min_validator_withdrawability_delay: Epoch::new(256), shard_committee_period: 256, + proposer_reorg_cutoff_bps: 1667, + attestation_due_bps: 3333, + aggregate_due_bps: 6667, + sync_message_due_bps: 3333, + contribution_due_bps: 6667, /* * Reward and penalty quotients @@ -1429,8 +1456,7 @@ impl ChainSpec { .expect("pow does not overflow"), whistleblower_reward_quotient_electra: u64::checked_pow(2, 12) .expect("pow does not overflow"), - max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 3) - .expect("pow does not overflow"), + max_pending_partials_per_withdrawals_sweep: 6, min_per_epoch_churn_limit_electra: option_wrapper(|| { u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) }) From 4908687e7d0b8b00dd6ecd3f7e646d952155a02f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 3 Nov 2025 19:06:03 +1100 Subject: [PATCH 3/5] Proposer duties backwards compat (#8335) The beacon API spec wasn't updated to use the Fulu definition of `dependent_root` for the proposer duties endpoint. No other client updated their logic, so to retain backwards compatibility the decision has been made to continue using the block root at the end of epoch `N - 1`, and introduce a new v2 endpoint down the track to use the correct dependent root. Eth R&D discussion: https://discord.com/channels/595666850260713488/598292067260825641/1433036715848765562 Change the behaviour of the v1 endpoint back to using the last slot of `N - 1` rather than the last slot of `N - 2`. This introduces the possibility of dependent root false positives (the root can change without changing the shuffling), but causes the least compatibility issues with other clients. Co-Authored-By: Michael Sproul --- .../beacon_chain/src/beacon_proposer_cache.rs | 23 ++++++++++++++-- beacon_node/beacon_chain/tests/store_tests.rs | 9 ++++--- beacon_node/http_api/src/proposer_duties.rs | 27 +++++++++++++------ .../http_api/tests/interactive_tests.rs | 3 +-- consensus/types/src/beacon_state.rs | 16 +++++++++++ testing/ef_tests/src/cases/fork_choice.rs | 2 +- 6 files changed, 64 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 6effce49f8..bd6460eba7 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -166,10 +166,17 @@ impl BeaconProposerCache { } /// Compute the proposer duties using the head state without cache. +/// +/// Return: +/// - Proposer indices. +/// - True dependent root. +/// - Legacy dependent root (last block of epoch `N - 1`). +/// - Head execution status. +/// - Fork at `request_epoch`. pub fn compute_proposer_duties_from_head( request_epoch: Epoch, chain: &BeaconChain, -) -> Result<(Vec, Hash256, ExecutionStatus, Fork), BeaconChainError> { +) -> Result<(Vec, Hash256, Hash256, ExecutionStatus, Fork), BeaconChainError> { // Atomically collect information about the head whilst holding the canonical head `Arc` as // short as possible. let (mut state, head_state_root, head_block_root) = { @@ -203,11 +210,23 @@ pub fn compute_proposer_duties_from_head( .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(BeaconChainError::from)?; + // This is only required because the V1 proposer duties endpoint spec wasn't updated for Fulu. We + // can delete this once the V1 endpoint is deprecated at the Glamsterdam fork. + let legacy_dependent_root = state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(BeaconChainError::from)?; + // Use fork_at_epoch rather than the state's fork, because post-Fulu we may not have advanced // the state completely into the new epoch. let fork = chain.spec.fork_at_epoch(request_epoch); - Ok((indices, dependent_root, execution_status, fork)) + Ok(( + indices, + dependent_root, + legacy_dependent_root, + execution_status, + fork, + )) } /// If required, advance `state` to the epoch required to determine proposer indices in `target_epoch`. diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 7891b22432..0c83244f44 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1561,7 +1561,7 @@ async fn proposer_duties_from_head_fulu() { // Compute the proposer duties at the next epoch from the head let next_epoch = head_state.next_epoch().unwrap(); - let (_indices, dependent_root, _, fork) = + let (_indices, dependent_root, legacy_dependent_root, _, fork) = compute_proposer_duties_from_head(next_epoch, &harness.chain).unwrap(); assert_eq!( @@ -1570,6 +1570,8 @@ async fn proposer_duties_from_head_fulu() { .proposer_shuffling_decision_root_at_epoch(next_epoch, head_block_root.into(), spec) .unwrap() ); + assert_ne!(dependent_root, legacy_dependent_root); + assert_eq!(legacy_dependent_root, Hash256::from(head_block_root)); assert_eq!(fork, head_state.fork()); } @@ -1617,7 +1619,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { assert_eq!(head_state.current_epoch(), gloas_fork_epoch - 1); // Compute the proposer duties at the fork epoch from the head. - let (indices, dependent_root, _, fork) = + let (indices, dependent_root, legacy_dependent_root, _, fork) = compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap(); assert_eq!( @@ -1630,6 +1632,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { ) .unwrap() ); + assert_ne!(dependent_root, legacy_dependent_root); assert_ne!(fork, head_state.fork()); assert_eq!(fork, spec.fork_at_epoch(gloas_fork_epoch)); @@ -1639,7 +1642,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { .add_attested_blocks_at_slots(head_state, head_state_root, &gloas_slots, &all_validators) .await; - let (no_lookahead_indices, no_lookahead_dependent_root, _, no_lookahead_fork) = + let (no_lookahead_indices, no_lookahead_dependent_root, _, _, no_lookahead_fork) = compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap(); assert_eq!(no_lookahead_indices, indices); diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 78f99c475c..1ebb174785 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -60,13 +60,13 @@ pub fn proposer_duties( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, execution_status, _fork) = + let (proposers, _dependent_root, legacy_dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( chain, request_epoch, - dependent_root, + legacy_dependent_root, execution_status.is_optimistic_or_invalid(), proposers, ) @@ -116,6 +116,11 @@ fn try_proposer_duties_from_cache( .beacon_state .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(warp_utils::reject::beacon_state_error)?; + let legacy_dependent_root = head + .snapshot + .beacon_state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::unhandled_error)?; @@ -129,7 +134,7 @@ fn try_proposer_duties_from_cache( convert_to_api_response( chain, request_epoch, - head_decision_root, + legacy_dependent_root, execution_optimistic, indices.to_vec(), ) @@ -151,7 +156,7 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { - let (indices, dependent_root, execution_status, fork) = + let (indices, dependent_root, legacy_dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; @@ -166,7 +171,7 @@ fn compute_and_cache_proposer_duties( convert_to_api_response( chain, current_epoch, - dependent_root, + legacy_dependent_root, execution_status.is_optimistic_or_invalid(), indices, ) @@ -229,12 +234,18 @@ fn compute_historic_proposer_duties( // We can supply the genesis block root as the block root since we know that the only block that // decides its own root is the genesis block. - let dependent_root = state - .proposer_shuffling_decision_root(chain.genesis_block_root, &chain.spec) + let legacy_dependent_root = state + .legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root) .map_err(BeaconChainError::from) .map_err(warp_utils::reject::unhandled_error)?; - convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices) + convert_to_api_response( + chain, + epoch, + legacy_dependent_root, + execution_optimistic, + indices, + ) } /// Converts the internal representation of proposer duties into one that is compatible with the diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 5b016a7de4..a9de737d65 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1017,10 +1017,9 @@ async fn proposer_duties_with_gossip_tolerance() { assert_eq!( proposer_duties_tolerant_current_epoch.dependent_root, head_state - .proposer_shuffling_decision_root_at_epoch( + .legacy_proposer_shuffling_decision_root_at_epoch( tolerant_current_epoch, head_block_root, - spec ) .unwrap() ); diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 1bd4927fe8..9c4e50dc61 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -911,6 +911,22 @@ impl BeaconState { } } + /// Returns the block root at the last slot of `epoch - 1`. + /// + /// This can be deleted after Glamsterdam and the removal of the v1 proposer duties endpoint. + pub fn legacy_proposer_shuffling_decision_root_at_epoch( + &self, + epoch: Epoch, + head_block_root: Hash256, + ) -> Result { + let decision_slot = epoch.saturating_sub(1u64).end_slot(E::slots_per_epoch()); + if self.slot() <= decision_slot { + Ok(head_block_root) + } else { + self.get_block_root(decision_slot).copied() + } + } + /// Returns the block root which decided the proposer shuffling for the current epoch. This root /// can be used to key this proposer shuffling. /// diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 47b9902345..8e9d438a24 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -920,7 +920,7 @@ impl Tester { let cached_head = self.harness.chain.canonical_head.cached_head(); let next_slot = cached_head.snapshot.beacon_block.slot() + 1; let next_slot_epoch = next_slot.epoch(E::slots_per_epoch()); - let (proposer_indices, decision_root, _, fork) = + let (proposer_indices, decision_root, _, _, fork) = compute_proposer_duties_from_head(next_slot_epoch, &self.harness.chain).unwrap(); let proposer_index = proposer_indices[next_slot.as_usize() % E::slots_per_epoch() as usize]; From 5d0f8a083ae3c23134e61686a7f12441c3256240 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 3 Nov 2025 00:06:06 -0800 Subject: [PATCH 4/5] Ensure custody backfill sync couples all responses before importing (#8339) Custody backfill sync has a bug when we request columns from more than one peer per batch. The fix here ensures we wait for all requests to be completed before performing verification and importing the responses. I've also added an endpoint `lighthouse/custody/backfill` that resets a nodes earliest available data column to the current epoch so that custody backfill can be triggered. This endpoint is needed to rescue any nodes that may have missing columns due to the custody backfill sync bug without requiring a full re-sync. Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Jimmy Chen Co-Authored-By: Michael Sproul --- .../beacon_chain/src/custody_context.rs | 81 +++++++++++++++++-- .../src/historical_data_columns.rs | 5 -- beacon_node/http_api/src/lib.rs | 32 ++++++++ beacon_node/http_api/src/test_utils.rs | 18 +++++ .../tests/broadcast_validation_tests.rs | 2 + beacon_node/http_api/tests/fork_tests.rs | 2 + .../http_api/tests/interactive_tests.rs | 65 +++++++++++++++ .../src/sync/custody_backfill_sync/mod.rs | 5 +- .../sync/range_data_column_batch_request.rs | 21 ++--- book/src/api_lighthouse.md | 10 +++ common/eth2/src/lighthouse.rs | 13 +++ 11 files changed, 230 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/custody_context.rs b/beacon_node/beacon_chain/src/custody_context.rs index 9a6f51174a..a5ef3ed2f6 100644 --- a/beacon_node/beacon_chain/src/custody_context.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -120,9 +120,7 @@ impl ValidatorRegistrations { let effective_epoch = (current_slot + effective_delay_slots).epoch(E::slots_per_epoch()) + 1; self.epoch_validator_custody_requirements - .entry(effective_epoch) - .and_modify(|old_custody| *old_custody = validator_custody_requirement) - .or_insert(validator_custody_requirement); + .insert(effective_epoch, validator_custody_requirement); Some((effective_epoch, validator_custody_requirement)) } else { None @@ -154,11 +152,25 @@ impl ValidatorRegistrations { }); self.epoch_validator_custody_requirements - .entry(effective_epoch) - .and_modify(|old_custody| *old_custody = latest_validator_custody) - .or_insert(latest_validator_custody); + .insert(effective_epoch, latest_validator_custody); } } + + /// Updates the `epoch -> cgc` map by pruning records before `effective_epoch` + /// while setting the `cgc` at `effective_epoch` to the latest validator custody requirement. + /// + /// This is used to restart custody backfill sync at `effective_epoch` + pub fn reset_validator_custody_requirements(&mut self, effective_epoch: Epoch) { + if let Some(latest_validator_custody_requirements) = + self.latest_validator_custody_requirement() + { + self.epoch_validator_custody_requirements + .retain(|&epoch, _| epoch >= effective_epoch); + + self.epoch_validator_custody_requirements + .insert(effective_epoch, latest_validator_custody_requirements); + }; + } } /// Given the `validator_custody_units`, return the custody requirement based on @@ -535,6 +547,14 @@ impl CustodyContext { .write() .backfill_validator_custody_requirements(effective_epoch, expected_cgc); } + + /// The node is attempting to restart custody backfill. Update the internal records so that + /// custody backfill can start backfilling at `effective_epoch`. + pub fn reset_validator_custody_requirements(&self, effective_epoch: Epoch) { + self.validator_registrations + .write() + .reset_validator_custody_requirements(effective_epoch); + } } /// Indicates that the custody group count (CGC) has increased. @@ -1491,4 +1511,53 @@ mod tests { ); } } + + #[test] + fn reset_validator_custody_requirements() { + let spec = E::default_spec(); + let minimum_cgc = 4u64; + let initial_cgc = 8u64; + let mid_cgc = 16u64; + let final_cgc = 32u64; + + // Setup: Node restart after multiple validator registrations causing CGC increases + let head_epoch = Epoch::new(20); + let epoch_and_cgc_tuples = vec![ + (Epoch::new(0), initial_cgc), + (Epoch::new(10), mid_cgc), + (head_epoch, final_cgc), + ]; + let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); + + // Backfill from epoch 20 to 9 + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(9), final_cgc); + + // Reset validator custody requirements to the latest cgc requirements at `head_epoch` up to the boundary epoch + custody_context.reset_validator_custody_requirements(head_epoch); + + // Verify epochs 0 - 19 return the minimum cgc requirement because of the validator custody requirement reset + for epoch in 0..=19 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + minimum_cgc, + ); + } + + // Verify epoch 20 returns a CGC of 32 + assert_eq!( + custody_context.custody_group_count_at_epoch(head_epoch, &spec), + final_cgc + ); + + // Rerun Backfill to epoch 20 + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(0), final_cgc); + + // Verify epochs 0 - 20 return the final cgc requirements + for epoch in 0..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + } } diff --git a/beacon_node/beacon_chain/src/historical_data_columns.rs b/beacon_node/beacon_chain/src/historical_data_columns.rs index 9304f06570..6cf947adcb 100644 --- a/beacon_node/beacon_chain/src/historical_data_columns.rs +++ b/beacon_node/beacon_chain/src/historical_data_columns.rs @@ -89,11 +89,6 @@ impl BeaconChain { .get_data_column(&block_root, &data_column.index)? .is_some() { - debug!( - block_root = ?block_root, - column_index = data_column.index, - "Skipping data column import as identical data column exists" - ); continue; } if block_root != data_column.block_root() { diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 41cd729a68..9026792b91 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -4604,6 +4604,37 @@ pub fn serve( }, ); + // POST lighthouse/custody/backfill + let post_lighthouse_custody_backfill = warp::path("lighthouse") + .and(warp::path("custody")) + .and(warp::path("backfill")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + // Calling this endpoint will trigger custody backfill once `effective_epoch`` + // is finalized. + let effective_epoch = chain + .canonical_head + .cached_head() + .head_slot() + .epoch(T::EthSpec::slots_per_epoch()) + + 1; + let custody_context = chain.data_availability_checker.custody_context(); + // Reset validator custody requirements to `effective_epoch` with the latest + // cgc requiremnets. + custody_context.reset_validator_custody_requirements(effective_epoch); + // Update `DataColumnCustodyInfo` to reflect the custody change. + chain.update_data_column_custody_info(Some( + effective_epoch.start_slot(T::EthSpec::slots_per_epoch()), + )); + Ok(()) + }) + }, + ); + // GET lighthouse/analysis/block_rewards let get_lighthouse_block_rewards = warp::path("lighthouse") .and(warp::path("analysis")) @@ -4963,6 +4994,7 @@ pub fn serve( .uor(post_lighthouse_compaction) .uor(post_lighthouse_add_peer) .uor(post_lighthouse_remove_peer) + .uor(post_lighthouse_custody_backfill) .recover(warp_utils::reject::handle_rejection), ), ) diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index fe9e0dff70..27e2a27d35 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,6 +1,7 @@ use crate::{Config, Context}; use beacon_chain::{ BeaconChain, BeaconChainTypes, + custody_context::NodeCustodyType, test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, }; use beacon_processor::{ @@ -67,6 +68,20 @@ impl InteractiveTester { None, Config::default(), true, + NodeCustodyType::Fullnode, + ) + .await + } + + pub async fn new_supernode(spec: Option, validator_count: usize) -> Self { + Self::new_with_initializer_and_mutator( + spec, + validator_count, + None, + None, + Config::default(), + true, + NodeCustodyType::Supernode, ) .await } @@ -78,6 +93,7 @@ impl InteractiveTester { mutator: Option>, config: Config, use_mock_builder: bool, + node_custody_type: NodeCustodyType, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec.map(Arc::new)) @@ -93,6 +109,8 @@ impl InteractiveTester { .fresh_ephemeral_store() }; + harness_builder = harness_builder.node_custody_type(node_custody_type); + // Add a mutator for the beacon chain builder which will be called in // `HarnessBuilder::build`. if let Some(mutator) = mutator { diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 9427f6fdf3..82723c2b40 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,3 +1,4 @@ +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::test_spec; use beacon_chain::{ GossipVerifiedBlock, IntoGossipVerifiedBlock, WhenSlotSkipped, @@ -1956,6 +1957,7 @@ pub async fn duplicate_block_status_code() { ..Config::default() }, true, + NodeCustodyType::Fullnode, ) .await; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 62a3461276..50cf866b6a 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,4 +1,5 @@ //! Tests for API behaviour across fork boundaries. +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ StateSkipConfig, test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee}, @@ -426,6 +427,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { None, Default::default(), true, + NodeCustodyType::Fullnode, ) .await; let harness = &tester.harness; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index a9de737d65..83cb70a7a3 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,4 +1,5 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ ChainConfig, chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, @@ -76,6 +77,7 @@ async fn state_by_root_pruned_from_fork_choice() { None, Default::default(), false, + NodeCustodyType::Fullnode, ) .await; @@ -433,6 +435,7 @@ pub async fn proposer_boost_re_org_test( })), Default::default(), false, + NodeCustodyType::Fullnode, ) .await; let harness = &tester.harness; @@ -1049,6 +1052,68 @@ async fn proposer_duties_with_gossip_tolerance() { ); } +// Test that a request to `lighthouse/custody/backfill` succeeds by verifying that `CustodyContext` and `DataColumnCustodyInfo` +// have been updated with the correct values. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn lighthouse_restart_custody_backfill() { + let spec = test_spec::(); + + // Skip pre-Fulu. + if !spec.is_fulu_scheduled() { + return; + } + + let validator_count = 24; + + let tester = InteractiveTester::::new_supernode(Some(spec), validator_count).await; + let harness = &tester.harness; + let spec = &harness.spec; + let client = &tester.client; + let min_cgc = spec.custody_requirement; + let max_cgc = spec.number_of_custody_groups; + + let num_blocks = 2 * E::slots_per_epoch(); + + let custody_context = harness.chain.data_availability_checker.custody_context(); + + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await; + + let cgc_at_head = custody_context.custody_group_count_at_head(spec); + let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch(); + + assert_eq!(cgc_at_head, max_cgc); + assert_eq!(earliest_data_column_epoch, None); + + custody_context + .update_and_backfill_custody_count_at_epoch(harness.chain.epoch().unwrap(), cgc_at_head); + client.post_lighthouse_custody_backfill().await.unwrap(); + + let cgc_at_head = custody_context.custody_group_count_at_head(spec); + let cgc_at_previous_epoch = + custody_context.custody_group_count_at_epoch(harness.chain.epoch().unwrap() - 1, spec); + let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch(); + + // `DataColumnCustodyInfo` should have been updated to the head epoch + assert_eq!( + earliest_data_column_epoch, + Some(harness.chain.epoch().unwrap() + 1) + ); + // Cgc requirements should have stayed the same at head + assert_eq!(cgc_at_head, max_cgc); + // Cgc requirements at the previous epoch should be `min_cgc` + // This allows for custody backfill to re-fetch columns for this epoch. + assert_eq!(cgc_at_previous_epoch, min_cgc); +} + // Test that a request for next epoch proposer duties suceeds when the current slot clock is within // gossip clock disparity (500ms) of the new epoch. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] diff --git a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs index 5c5505083f..bb2c6799f1 100644 --- a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs @@ -382,11 +382,9 @@ impl CustodyBackFillSync { return None; }; - let mut missing_columns = HashSet::new(); - // Skip all batches (Epochs) that don't have missing columns. for epoch in Epoch::range_inclusive_rev(self.to_be_downloaded, column_da_boundary) { - missing_columns = self.beacon_chain.get_missing_columns_for_epoch(epoch); + let missing_columns = self.beacon_chain.get_missing_columns_for_epoch(epoch); if !missing_columns.is_empty() { self.to_be_downloaded = epoch; @@ -445,6 +443,7 @@ impl CustodyBackFillSync { self.include_next_batch() } Entry::Vacant(entry) => { + let missing_columns = self.beacon_chain.get_missing_columns_for_epoch(batch_id); entry.insert(BatchInfo::new( &batch_id, CUSTODY_BACKFILL_EPOCHS_PER_BATCH, diff --git a/beacon_node/network/src/sync/range_data_column_batch_request.rs b/beacon_node/network/src/sync/range_data_column_batch_request.rs index 542d99d97c..72e2fb2d5b 100644 --- a/beacon_node/network/src/sync/range_data_column_batch_request.rs +++ b/beacon_node/network/src/sync/range_data_column_batch_request.rs @@ -70,16 +70,17 @@ impl RangeDataColumnBatchRequest { HashMap::new(); let mut column_to_peer_id: HashMap = HashMap::new(); - for column in self - .requests - .values() - .filter_map(|req| req.to_finished()) - .flatten() - { - received_columns_for_slot - .entry(column.slot()) - .or_default() - .push(column.clone()); + for req in self.requests.values() { + let Some(columns) = req.to_finished() else { + return None; + }; + + for column in columns { + received_columns_for_slot + .entry(column.slot()) + .or_default() + .push(column.clone()); + } } // Note: this assumes that only 1 peer is responsible for a column diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index 2e694989f9..f804cb9df2 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -447,6 +447,16 @@ indicating that all states with slots `>= 0` are available, i.e., full state his on the specific meanings of these fields see the docs on [Checkpoint Sync](./advanced_checkpoint_sync.md#how-to-run-an-archived-node). +## `/lighthouse/custody/backfill` + +Starts a custody backfill sync from the next epoch with the node's latest custody requirements. The sync won't begin immediately, it waits until the next epoch is finalized before triggering. + +This endpoint should only be used to fix nodes that may have partial custody columns due to a prior backfill bug (present in v8.0.0-rc.2). Use with caution as it re-downloads all historic custody data columns and may consume significant bandwidth. + +```bash +curl -X POST "http://localhost:5052/lighthouse/custody/backfill" +``` + ## `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index f65b5a07b6..4f9a049e44 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -208,6 +208,19 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `POST lighthouse/custody/backfill` + pub async fn post_lighthouse_custody_backfill(&self) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("custody") + .push("backfill"); + + self.post(path, &()).await + } + /* * Note: * From e3ee7febce64c1b5a85c3ab0be0619571ee92d58 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 4 Nov 2025 11:34:47 +1100 Subject: [PATCH 5/5] Release v8.0.0 (#8352) N/A Includes the following unmerged PRs: - #8344 - #8335 - #8339 This PR should be merged after all above PRs are merged. Co-Authored-By: Jimmy Chen Co-Authored-By: Jimmy Chen --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1efb1fbc70..7c58274598 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -918,7 +918,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -1193,7 +1193,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "beacon_node", "bytes", @@ -5064,7 +5064,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -5574,7 +5574,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 56c2fb410c..6a54d3342e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "8.0.0-rc.2" +version = "8.0.0" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index bd7b37926f..a0965fa548 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v8.0.0-rc.2-", - fallback = "Lighthouse/v8.0.0-rc.2" + prefix = "Lighthouse/v8.0.0-", + fallback = "Lighthouse/v8.0.0" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "8.0.0-rc.2" + "8.0.0" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 2698073b5f..6b7aeb886c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "8.0.0-rc.2" +version = "8.0.0" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index a3240c6d7c..0d4129817a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "8.0.0-rc.2" +version = "8.0.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false