diff --git a/Cargo.lock b/Cargo.lock index bf2d6dd2d1..676f674b1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 4 [[package]] name = "account_manager" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "bls", @@ -918,7 +918,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -1193,7 +1193,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "beacon_node", "bytes", @@ -5047,7 +5047,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -5557,7 +5557,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_manager", "account_utils", @@ -5691,7 +5691,7 @@ dependencies = [ [[package]] name = "lighthouse_version" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "regex", ] @@ -10091,7 +10091,7 @@ dependencies = [ [[package]] name = "validator_client" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "beacon_node_fallback", diff --git a/Cargo.toml b/Cargo.toml index 0ca8cbf83c..1dfc753b8c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,7 +94,7 @@ resolver = "2" [workspace.package] edition = "2024" -version = "8.0.0-rc.2" +version = "8.0.0" [workspace.dependencies] account_utils = { path = "common/account_utils" } diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 6effce49f8..bd6460eba7 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -166,10 +166,17 @@ impl BeaconProposerCache { } /// Compute the proposer duties using the head state without cache. +/// +/// Return: +/// - Proposer indices. +/// - True dependent root. +/// - Legacy dependent root (last block of epoch `N - 1`). +/// - Head execution status. +/// - Fork at `request_epoch`. pub fn compute_proposer_duties_from_head( request_epoch: Epoch, chain: &BeaconChain, -) -> Result<(Vec, Hash256, ExecutionStatus, Fork), BeaconChainError> { +) -> Result<(Vec, Hash256, Hash256, ExecutionStatus, Fork), BeaconChainError> { // Atomically collect information about the head whilst holding the canonical head `Arc` as // short as possible. let (mut state, head_state_root, head_block_root) = { @@ -203,11 +210,23 @@ pub fn compute_proposer_duties_from_head( .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(BeaconChainError::from)?; + // This is only required because the V1 proposer duties endpoint spec wasn't updated for Fulu. We + // can delete this once the V1 endpoint is deprecated at the Glamsterdam fork. + let legacy_dependent_root = state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(BeaconChainError::from)?; + // Use fork_at_epoch rather than the state's fork, because post-Fulu we may not have advanced // the state completely into the new epoch. let fork = chain.spec.fork_at_epoch(request_epoch); - Ok((indices, dependent_root, execution_status, fork)) + Ok(( + indices, + dependent_root, + legacy_dependent_root, + execution_status, + fork, + )) } /// If required, advance `state` to the epoch required to determine proposer indices in `target_epoch`. diff --git a/beacon_node/beacon_chain/src/custody_context.rs b/beacon_node/beacon_chain/src/custody_context.rs index 0da0e7573e..a5ef3ed2f6 100644 --- a/beacon_node/beacon_chain/src/custody_context.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -120,9 +120,7 @@ impl ValidatorRegistrations { let effective_epoch = (current_slot + effective_delay_slots).epoch(E::slots_per_epoch()) + 1; self.epoch_validator_custody_requirements - .entry(effective_epoch) - .and_modify(|old_custody| *old_custody = validator_custody_requirement) - .or_insert(validator_custody_requirement); + .insert(effective_epoch, validator_custody_requirement); Some((effective_epoch, validator_custody_requirement)) } else { None @@ -134,8 +132,17 @@ impl ValidatorRegistrations { /// /// This is done by pruning all values on/after `effective_epoch` and updating the map to store /// the latest validator custody requirements for the `effective_epoch`. - pub fn backfill_validator_custody_requirements(&mut self, effective_epoch: Epoch) { + pub fn backfill_validator_custody_requirements( + &mut self, + effective_epoch: Epoch, + expected_cgc: u64, + ) { if let Some(latest_validator_custody) = self.latest_validator_custody_requirement() { + // If the expected cgc isn't equal to the latest validator custody a very recent cgc change may have occurred. + // We should not update the mapping. + if expected_cgc != latest_validator_custody { + return; + } // Delete records if // 1. The epoch is greater than or equal than `effective_epoch` // 2. the cgc requirements match the latest validator custody requirements @@ -145,11 +152,25 @@ impl ValidatorRegistrations { }); self.epoch_validator_custody_requirements - .entry(effective_epoch) - .and_modify(|old_custody| *old_custody = latest_validator_custody) - .or_insert(latest_validator_custody); + .insert(effective_epoch, latest_validator_custody); } } + + /// Updates the `epoch -> cgc` map by pruning records before `effective_epoch` + /// while setting the `cgc` at `effective_epoch` to the latest validator custody requirement. + /// + /// This is used to restart custody backfill sync at `effective_epoch` + pub fn reset_validator_custody_requirements(&mut self, effective_epoch: Epoch) { + if let Some(latest_validator_custody_requirements) = + self.latest_validator_custody_requirement() + { + self.epoch_validator_custody_requirements + .retain(|&epoch, _| epoch >= effective_epoch); + + self.epoch_validator_custody_requirements + .insert(effective_epoch, latest_validator_custody_requirements); + }; + } } /// Given the `validator_custody_units`, return the custody requirement based on @@ -517,10 +538,22 @@ impl CustodyContext { /// The node has completed backfill for this epoch. Update the internal records so the function /// [`Self::custody_columns_for_epoch()`] returns up-to-date results. - pub fn update_and_backfill_custody_count_at_epoch(&self, effective_epoch: Epoch) { + pub fn update_and_backfill_custody_count_at_epoch( + &self, + effective_epoch: Epoch, + expected_cgc: u64, + ) { self.validator_registrations .write() - .backfill_validator_custody_requirements(effective_epoch); + .backfill_validator_custody_requirements(effective_epoch, expected_cgc); + } + + /// The node is attempting to restart custody backfill. Update the internal records so that + /// custody backfill can start backfilling at `effective_epoch`. + pub fn reset_validator_custody_requirements(&self, effective_epoch: Epoch) { + self.validator_registrations + .write() + .reset_validator_custody_requirements(effective_epoch); } } @@ -604,11 +637,13 @@ mod tests { custody_context: &CustodyContext, start_epoch: Epoch, end_epoch: Epoch, + expected_cgc: u64, ) { assert!(start_epoch >= end_epoch); // Call from end_epoch down to start_epoch (inclusive), simulating backfill for epoch in (end_epoch.as_u64()..=start_epoch.as_u64()).rev() { - custody_context.update_and_backfill_custody_count_at_epoch(Epoch::new(epoch)); + custody_context + .update_and_backfill_custody_count_at_epoch(Epoch::new(epoch), expected_cgc); } } @@ -1368,7 +1403,7 @@ mod tests { ); // Backfill from epoch 20 down to 15 (simulating backfill) - complete_backfill_for_epochs(&custody_context, head_epoch, Epoch::new(15)); + complete_backfill_for_epochs(&custody_context, head_epoch, Epoch::new(15), final_cgc); // After backfilling to epoch 15, it should use latest CGC (32) assert_eq!( @@ -1406,7 +1441,7 @@ mod tests { let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); // Backfill to epoch 15 (between the two CGC increases) - complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15)); + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15), final_cgc); // Verify epochs 15 - 20 return latest CGC (32) for epoch in 15..=20 { @@ -1424,4 +1459,105 @@ mod tests { ); } } + + #[test] + fn attempt_backfill_with_invalid_cgc() { + let spec = E::default_spec(); + let initial_cgc = 8u64; + let mid_cgc = 16u64; + let final_cgc = 32u64; + + // Setup: Node restart after multiple validator registrations causing CGC increases + let head_epoch = Epoch::new(20); + let epoch_and_cgc_tuples = vec![ + (Epoch::new(0), initial_cgc), + (Epoch::new(10), mid_cgc), + (head_epoch, final_cgc), + ]; + let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); + + // Backfill to epoch 15 (between the two CGC increases) + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15), final_cgc); + + // Verify epochs 15 - 20 return latest CGC (32) + for epoch in 15..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + + // Attempt backfill with an incorrect cgc value + complete_backfill_for_epochs( + &custody_context, + Epoch::new(20), + Epoch::new(15), + initial_cgc, + ); + + // Verify epochs 15 - 20 still return latest CGC (32) + for epoch in 15..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + + // Verify epochs 10-14 still return mid_cgc (16) + for epoch in 10..14 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + mid_cgc, + ); + } + } + + #[test] + fn reset_validator_custody_requirements() { + let spec = E::default_spec(); + let minimum_cgc = 4u64; + let initial_cgc = 8u64; + let mid_cgc = 16u64; + let final_cgc = 32u64; + + // Setup: Node restart after multiple validator registrations causing CGC increases + let head_epoch = Epoch::new(20); + let epoch_and_cgc_tuples = vec![ + (Epoch::new(0), initial_cgc), + (Epoch::new(10), mid_cgc), + (head_epoch, final_cgc), + ]; + let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); + + // Backfill from epoch 20 to 9 + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(9), final_cgc); + + // Reset validator custody requirements to the latest cgc requirements at `head_epoch` up to the boundary epoch + custody_context.reset_validator_custody_requirements(head_epoch); + + // Verify epochs 0 - 19 return the minimum cgc requirement because of the validator custody requirement reset + for epoch in 0..=19 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + minimum_cgc, + ); + } + + // Verify epoch 20 returns a CGC of 32 + assert_eq!( + custody_context.custody_group_count_at_epoch(head_epoch, &spec), + final_cgc + ); + + // Rerun Backfill to epoch 20 + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(0), final_cgc); + + // Verify epochs 0 - 20 return the final cgc requirements + for epoch in 0..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + } } diff --git a/beacon_node/beacon_chain/src/historical_data_columns.rs b/beacon_node/beacon_chain/src/historical_data_columns.rs index 7e196eb75e..6cf947adcb 100644 --- a/beacon_node/beacon_chain/src/historical_data_columns.rs +++ b/beacon_node/beacon_chain/src/historical_data_columns.rs @@ -54,6 +54,7 @@ impl BeaconChain { &self, epoch: Epoch, historical_data_column_sidecar_list: DataColumnSidecarList, + expected_cgc: u64, ) -> Result { let mut total_imported = 0; let mut ops = vec![]; @@ -88,11 +89,6 @@ impl BeaconChain { .get_data_column(&block_root, &data_column.index)? .is_some() { - debug!( - block_root = ?block_root, - column_index = data_column.index, - "Skipping data column import as identical data column exists" - ); continue; } if block_root != data_column.block_root() { @@ -136,7 +132,7 @@ impl BeaconChain { self.data_availability_checker .custody_context() - .update_and_backfill_custody_count_at_epoch(epoch); + .update_and_backfill_custody_count_at_epoch(epoch, expected_cgc); self.safely_backfill_data_column_custody_info(epoch) .map_err(|e| HistoricalDataColumnError::BeaconChainError(Box::new(e)))?; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 41c8f905be..25f824c19b 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1561,7 +1561,7 @@ async fn proposer_duties_from_head_fulu() { // Compute the proposer duties at the next epoch from the head let next_epoch = head_state.next_epoch().unwrap(); - let (_indices, dependent_root, _, fork) = + let (_indices, dependent_root, legacy_dependent_root, _, fork) = compute_proposer_duties_from_head(next_epoch, &harness.chain).unwrap(); assert_eq!( @@ -1570,6 +1570,8 @@ async fn proposer_duties_from_head_fulu() { .proposer_shuffling_decision_root_at_epoch(next_epoch, head_block_root.into(), spec) .unwrap() ); + assert_ne!(dependent_root, legacy_dependent_root); + assert_eq!(legacy_dependent_root, Hash256::from(head_block_root)); assert_eq!(fork, head_state.fork()); } @@ -1617,7 +1619,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { assert_eq!(head_state.current_epoch(), gloas_fork_epoch - 1); // Compute the proposer duties at the fork epoch from the head. - let (indices, dependent_root, _, fork) = + let (indices, dependent_root, legacy_dependent_root, _, fork) = compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap(); assert_eq!( @@ -1630,6 +1632,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { ) .unwrap() ); + assert_ne!(dependent_root, legacy_dependent_root); assert_ne!(fork, head_state.fork()); assert_eq!(fork, spec.fork_at_epoch(gloas_fork_epoch)); @@ -1639,7 +1642,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { .add_attested_blocks_at_slots(head_state, head_state_root, &gloas_slots, &all_validators) .await; - let (no_lookahead_indices, no_lookahead_dependent_root, _, no_lookahead_fork) = + let (no_lookahead_indices, no_lookahead_dependent_root, _, _, no_lookahead_fork) = compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap(); assert_eq!(no_lookahead_indices, indices); @@ -3182,6 +3185,8 @@ async fn weak_subjectivity_sync_test( assert_eq!(store.get_anchor_info().state_upper_limit, Slot::new(0)); } +// This test prunes data columns from epoch 0 and then tries to re-import them via +// the same code paths that custody backfill sync imports data columns #[tokio::test] async fn test_import_historical_data_columns_batch() { let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); @@ -3189,6 +3194,7 @@ async fn test_import_historical_data_columns_batch() { let store = get_store_generic(&db_path, StoreConfig::default(), spec); let start_slot = Epoch::new(0).start_slot(E::slots_per_epoch()) + 1; let end_slot = Epoch::new(0).end_slot(E::slots_per_epoch()); + let cgc = 128; let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); @@ -3208,6 +3214,7 @@ async fn test_import_historical_data_columns_batch() { let mut data_columns_list = vec![]; + // Get all data columns for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); @@ -3228,6 +3235,7 @@ async fn test_import_historical_data_columns_batch() { harness.advance_slot(); + // Prune data columns harness .chain .store @@ -3239,21 +3247,25 @@ async fn test_import_historical_data_columns_batch() { .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); + // Assert that data columns no longer exist for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); assert!(data_columns.is_none()) } + // Re-import deleted data columns harness .chain - .import_historical_data_column_batch(Epoch::new(0), data_columns_list) + .import_historical_data_column_batch(Epoch::new(0), data_columns_list, cgc) .unwrap(); + let block_root_iter = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); + // Assert that data columns now exist for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); if !harness @@ -3272,6 +3284,7 @@ async fn test_import_historical_data_columns_batch() { } // This should verify that a data column sidecar containing mismatched block roots should fail to be imported. +// This also covers any test cases related to data columns with incorrect/invalid/mismatched block roots. #[tokio::test] async fn test_import_historical_data_columns_batch_mismatched_block_root() { let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); @@ -3279,6 +3292,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { let store = get_store_generic(&db_path, StoreConfig::default(), spec); let start_slot = Slot::new(1); let end_slot = Slot::new(E::slots_per_epoch() * 2 - 1); + let cgc = 128; let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); @@ -3298,6 +3312,8 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { let mut data_columns_list = vec![]; + // Get all data columns from start_slot to end_slot + // and mutate the data columns with an invalid block root for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); @@ -3323,6 +3339,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { harness.advance_slot(); + // Prune blobs harness .chain .store @@ -3334,17 +3351,20 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); + // Assert there are no columns between start_slot and end_slot for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); assert!(data_columns.is_none()) } + // Attempt to import data columns with invalid block roots and expect a failure let error = harness .chain .import_historical_data_column_batch( start_slot.epoch(E::slots_per_epoch()), data_columns_list, + cgc, ) .unwrap_err(); @@ -3367,6 +3387,7 @@ async fn test_import_historical_data_columns_batch_no_block_found() { let store = get_store_generic(&db_path, StoreConfig::default(), spec); let start_slot = Slot::new(1); let end_slot = Slot::new(E::slots_per_epoch() * 2 - 1); + let cgc = 128; let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); @@ -3428,7 +3449,7 @@ async fn test_import_historical_data_columns_batch_no_block_found() { let error = harness .chain - .import_historical_data_column_batch(Epoch::new(0), data_columns_list) + .import_historical_data_column_batch(Epoch::new(0), data_columns_list, cgc) .unwrap_err(); assert!(matches!( diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 41cd729a68..9026792b91 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -4604,6 +4604,37 @@ pub fn serve( }, ); + // POST lighthouse/custody/backfill + let post_lighthouse_custody_backfill = warp::path("lighthouse") + .and(warp::path("custody")) + .and(warp::path("backfill")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + // Calling this endpoint will trigger custody backfill once `effective_epoch`` + // is finalized. + let effective_epoch = chain + .canonical_head + .cached_head() + .head_slot() + .epoch(T::EthSpec::slots_per_epoch()) + + 1; + let custody_context = chain.data_availability_checker.custody_context(); + // Reset validator custody requirements to `effective_epoch` with the latest + // cgc requiremnets. + custody_context.reset_validator_custody_requirements(effective_epoch); + // Update `DataColumnCustodyInfo` to reflect the custody change. + chain.update_data_column_custody_info(Some( + effective_epoch.start_slot(T::EthSpec::slots_per_epoch()), + )); + Ok(()) + }) + }, + ); + // GET lighthouse/analysis/block_rewards let get_lighthouse_block_rewards = warp::path("lighthouse") .and(warp::path("analysis")) @@ -4963,6 +4994,7 @@ pub fn serve( .uor(post_lighthouse_compaction) .uor(post_lighthouse_add_peer) .uor(post_lighthouse_remove_peer) + .uor(post_lighthouse_custody_backfill) .recover(warp_utils::reject::handle_rejection), ), ) diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 78f99c475c..1ebb174785 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -60,13 +60,13 @@ pub fn proposer_duties( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, execution_status, _fork) = + let (proposers, _dependent_root, legacy_dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( chain, request_epoch, - dependent_root, + legacy_dependent_root, execution_status.is_optimistic_or_invalid(), proposers, ) @@ -116,6 +116,11 @@ fn try_proposer_duties_from_cache( .beacon_state .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(warp_utils::reject::beacon_state_error)?; + let legacy_dependent_root = head + .snapshot + .beacon_state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::unhandled_error)?; @@ -129,7 +134,7 @@ fn try_proposer_duties_from_cache( convert_to_api_response( chain, request_epoch, - head_decision_root, + legacy_dependent_root, execution_optimistic, indices.to_vec(), ) @@ -151,7 +156,7 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { - let (indices, dependent_root, execution_status, fork) = + let (indices, dependent_root, legacy_dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; @@ -166,7 +171,7 @@ fn compute_and_cache_proposer_duties( convert_to_api_response( chain, current_epoch, - dependent_root, + legacy_dependent_root, execution_status.is_optimistic_or_invalid(), indices, ) @@ -229,12 +234,18 @@ fn compute_historic_proposer_duties( // We can supply the genesis block root as the block root since we know that the only block that // decides its own root is the genesis block. - let dependent_root = state - .proposer_shuffling_decision_root(chain.genesis_block_root, &chain.spec) + let legacy_dependent_root = state + .legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root) .map_err(BeaconChainError::from) .map_err(warp_utils::reject::unhandled_error)?; - convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices) + convert_to_api_response( + chain, + epoch, + legacy_dependent_root, + execution_optimistic, + indices, + ) } /// Converts the internal representation of proposer duties into one that is compatible with the diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index fe9e0dff70..27e2a27d35 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,6 +1,7 @@ use crate::{Config, Context}; use beacon_chain::{ BeaconChain, BeaconChainTypes, + custody_context::NodeCustodyType, test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, }; use beacon_processor::{ @@ -67,6 +68,20 @@ impl InteractiveTester { None, Config::default(), true, + NodeCustodyType::Fullnode, + ) + .await + } + + pub async fn new_supernode(spec: Option, validator_count: usize) -> Self { + Self::new_with_initializer_and_mutator( + spec, + validator_count, + None, + None, + Config::default(), + true, + NodeCustodyType::Supernode, ) .await } @@ -78,6 +93,7 @@ impl InteractiveTester { mutator: Option>, config: Config, use_mock_builder: bool, + node_custody_type: NodeCustodyType, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec.map(Arc::new)) @@ -93,6 +109,8 @@ impl InteractiveTester { .fresh_ephemeral_store() }; + harness_builder = harness_builder.node_custody_type(node_custody_type); + // Add a mutator for the beacon chain builder which will be called in // `HarnessBuilder::build`. if let Some(mutator) = mutator { diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 1b79c13d76..33f462fa5e 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,3 +1,4 @@ +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::test_spec; use beacon_chain::{ GossipVerifiedBlock, IntoGossipVerifiedBlock, WhenSlotSkipped, @@ -1967,6 +1968,7 @@ pub async fn duplicate_block_status_code() { ..Config::default() }, true, + NodeCustodyType::Fullnode, ) .await; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 62a3461276..50cf866b6a 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,4 +1,5 @@ //! Tests for API behaviour across fork boundaries. +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ StateSkipConfig, test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee}, @@ -426,6 +427,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { None, Default::default(), true, + NodeCustodyType::Fullnode, ) .await; let harness = &tester.harness; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 5b016a7de4..83cb70a7a3 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,4 +1,5 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ ChainConfig, chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, @@ -76,6 +77,7 @@ async fn state_by_root_pruned_from_fork_choice() { None, Default::default(), false, + NodeCustodyType::Fullnode, ) .await; @@ -433,6 +435,7 @@ pub async fn proposer_boost_re_org_test( })), Default::default(), false, + NodeCustodyType::Fullnode, ) .await; let harness = &tester.harness; @@ -1017,10 +1020,9 @@ async fn proposer_duties_with_gossip_tolerance() { assert_eq!( proposer_duties_tolerant_current_epoch.dependent_root, head_state - .proposer_shuffling_decision_root_at_epoch( + .legacy_proposer_shuffling_decision_root_at_epoch( tolerant_current_epoch, head_block_root, - spec ) .unwrap() ); @@ -1050,6 +1052,68 @@ async fn proposer_duties_with_gossip_tolerance() { ); } +// Test that a request to `lighthouse/custody/backfill` succeeds by verifying that `CustodyContext` and `DataColumnCustodyInfo` +// have been updated with the correct values. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn lighthouse_restart_custody_backfill() { + let spec = test_spec::(); + + // Skip pre-Fulu. + if !spec.is_fulu_scheduled() { + return; + } + + let validator_count = 24; + + let tester = InteractiveTester::::new_supernode(Some(spec), validator_count).await; + let harness = &tester.harness; + let spec = &harness.spec; + let client = &tester.client; + let min_cgc = spec.custody_requirement; + let max_cgc = spec.number_of_custody_groups; + + let num_blocks = 2 * E::slots_per_epoch(); + + let custody_context = harness.chain.data_availability_checker.custody_context(); + + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await; + + let cgc_at_head = custody_context.custody_group_count_at_head(spec); + let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch(); + + assert_eq!(cgc_at_head, max_cgc); + assert_eq!(earliest_data_column_epoch, None); + + custody_context + .update_and_backfill_custody_count_at_epoch(harness.chain.epoch().unwrap(), cgc_at_head); + client.post_lighthouse_custody_backfill().await.unwrap(); + + let cgc_at_head = custody_context.custody_group_count_at_head(spec); + let cgc_at_previous_epoch = + custody_context.custody_group_count_at_epoch(harness.chain.epoch().unwrap() - 1, spec); + let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch(); + + // `DataColumnCustodyInfo` should have been updated to the head epoch + assert_eq!( + earliest_data_column_epoch, + Some(harness.chain.epoch().unwrap() + 1) + ); + // Cgc requirements should have stayed the same at head + assert_eq!(cgc_at_head, max_cgc); + // Cgc requirements at the previous epoch should be `min_cgc` + // This allows for custody backfill to re-fetch columns for this epoch. + assert_eq!(cgc_at_previous_epoch, min_cgc); +} + // Test that a request for next epoch proposer duties suceeds when the current slot clock is within // gossip clock disparity (500ms) of the new epoch. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7441e92871..5fa2361f28 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -497,9 +497,11 @@ impl NetworkBeaconProcessor { self: &Arc, batch_id: CustodyBackfillBatchId, data_columns: DataColumnSidecarList, + expected_cgc: u64, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || processor.process_historic_data_columns(batch_id, data_columns); + let process_fn = + move || processor.process_historic_data_columns(batch_id, data_columns, expected_cgc); let work = Work::ChainSegmentBackfill(Box::new(process_fn)); diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 41b12fa01b..41160fcfe4 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -426,6 +426,7 @@ impl NetworkBeaconProcessor { &self, batch_id: CustodyBackfillBatchId, downloaded_columns: DataColumnSidecarList, + expected_cgc: u64, ) { let _guard = debug_span!( SPAN_CUSTODY_BACKFILL_SYNC_IMPORT_COLUMNS, @@ -435,10 +436,11 @@ impl NetworkBeaconProcessor { .entered(); let sent_columns = downloaded_columns.len(); - let result = match self - .chain - .import_historical_data_column_batch(batch_id.epoch, downloaded_columns) - { + let result = match self.chain.import_historical_data_column_batch( + batch_id.epoch, + downloaded_columns, + expected_cgc, + ) { Ok(imported_columns) => { metrics::inc_counter_by( &metrics::BEACON_PROCESSOR_CUSTODY_BACKFILL_COLUMN_IMPORT_SUCCESS_TOTAL, diff --git a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs index 69df3422e6..bb2c6799f1 100644 --- a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs @@ -382,11 +382,9 @@ impl CustodyBackFillSync { return None; }; - let mut missing_columns = HashSet::new(); - // Skip all batches (Epochs) that don't have missing columns. for epoch in Epoch::range_inclusive_rev(self.to_be_downloaded, column_da_boundary) { - missing_columns = self.beacon_chain.get_missing_columns_for_epoch(epoch); + let missing_columns = self.beacon_chain.get_missing_columns_for_epoch(epoch); if !missing_columns.is_empty() { self.to_be_downloaded = epoch; @@ -445,6 +443,7 @@ impl CustodyBackFillSync { self.include_next_batch() } Entry::Vacant(entry) => { + let missing_columns = self.beacon_chain.get_missing_columns_for_epoch(batch_id); entry.insert(BatchInfo::new( &batch_id, CUSTODY_BACKFILL_EPOCHS_PER_BATCH, @@ -504,6 +503,7 @@ impl CustodyBackFillSync { run_id: self.run_id, }, data_columns, + self.cgc, ) { crit!( msg = "process_batch", diff --git a/beacon_node/network/src/sync/range_data_column_batch_request.rs b/beacon_node/network/src/sync/range_data_column_batch_request.rs index 542d99d97c..72e2fb2d5b 100644 --- a/beacon_node/network/src/sync/range_data_column_batch_request.rs +++ b/beacon_node/network/src/sync/range_data_column_batch_request.rs @@ -70,16 +70,17 @@ impl RangeDataColumnBatchRequest { HashMap::new(); let mut column_to_peer_id: HashMap = HashMap::new(); - for column in self - .requests - .values() - .filter_map(|req| req.to_finished()) - .flatten() - { - received_columns_for_slot - .entry(column.slot()) - .or_default() - .push(column.clone()); + for req in self.requests.values() { + let Some(columns) = req.to_finished() else { + return None; + }; + + for column in columns { + received_columns_for_slot + .entry(column.slot()) + .or_default() + .push(column.clone()); + } } // Note: this assumes that only 1 peer is responsible for a column diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index fa093be3f6..0442bf4ec0 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -468,6 +468,16 @@ curl "http://localhost:5052/lighthouse/custody/info" | jq } ``` +## `/lighthouse/custody/backfill` + +Starts a custody backfill sync from the next epoch with the node's latest custody requirements. The sync won't begin immediately, it waits until the next epoch is finalized before triggering. + +This endpoint should only be used to fix nodes that may have partial custody columns due to a prior backfill bug (present in v8.0.0-rc.2). Use with caution as it re-downloads all historic custody data columns and may consume significant bandwidth. + +```bash +curl -X POST "http://localhost:5052/lighthouse/custody/backfill" +``` + ## `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index f65b5a07b6..4f9a049e44 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -208,6 +208,19 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `POST lighthouse/custody/backfill` + pub async fn post_lighthouse_custody_backfill(&self) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("custody") + .push("backfill"); + + self.post(path, &()).await + } + /* * Note: * diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index ca7f85b512..49168018cb 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -6,7 +6,9 @@ PRESET_BASE: 'mainnet' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one +# * 'sepolia' - testnet # * 'holesky' - testnet +# * 'hoodi' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'mainnet' @@ -52,23 +54,37 @@ ELECTRA_FORK_VERSION: 0x05000000 ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC # Fulu FULU_FORK_VERSION: 0x06000000 -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 411392 # December 3, 2025, 09:49:11pm UTC # Gloas GLOAS_FORK_VERSION: 0x07000000 GLOAS_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- -# 12 seconds +# 12 seconds (*deprecated*) SECONDS_PER_SLOT: 12 +# 12000 milliseconds +SLOT_DURATION_MS: 12000 # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours +# 2**8 (= 256) epochs MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours +# 2**8 (= 256) epochs SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours +# 2**11 (= 2,048) Eth1 blocks ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle # --------------------------------------------------------------- @@ -78,13 +94,21 @@ INACTIVITY_SCORE_BIAS: 4 INACTIVITY_SCORE_RECOVERY_RATE: 16 # 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) +# 2**2 (= 4) validators MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 -# [New in Deneb:EIP7514] 2**3 (= 8) + +# Deneb +# 2**3 (= 8) (*deprecated*) MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 +# Electra +# 2**7 * 10**9 (= 128,000,000,000) Gwei +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) Gwei +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 + # Fork choice # --------------------------------------------------------------- # 40% @@ -93,7 +117,7 @@ PROPOSER_SCORE_BOOST: 40 REORG_HEAD_WEIGHT_THRESHOLD: 20 # 160% REORG_PARENT_WEIGHT_THRESHOLD: 160 -# `2` epochs +# 2 epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 # Deposit contract @@ -105,18 +129,19 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- -# `10 * 2**20` (= 10485760, 10 MiB) +# 10 * 2**20 (= 10,485,760) bytes, 10 MiB MAX_PAYLOAD_SIZE: 10485760 -# `2**10` (= 1024) +# 2**10 (= 1,024) blocks MAX_REQUEST_BLOCKS: 1024 -# `2**8` (= 256) +# 2**8 (= 256) epochs EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 -# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +# MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33,024) epochs MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 # 5s TTFB_TIMEOUT: 5 # 10s RESP_TIMEOUT: 10 +# 2**5 (= 32) slots ATTESTATION_PROPAGATION_SLOT_RANGE: 32 # 500ms MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 @@ -124,45 +149,59 @@ MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 # 2 subnets per node SUBNETS_PER_NODE: 2 -# 2**8 (= 64) +# 2**6 (= 64) subnets ATTESTATION_SUBNET_COUNT: 64 +# 0 bits ATTESTATION_SUBNET_EXTRA_BITS: 0 -# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS (= 6 + 0) bits ATTESTATION_SUBNET_PREFIX_BITS: 6 ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # Deneb -# `2**7` (=128) +# 2**7 (= 128) blocks MAX_REQUEST_BLOCKS_DENEB: 128 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK -MAX_REQUEST_BLOB_SIDECARS: 768 -# `2**12` (= 4096 epochs, ~18 days) +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 -# `6` +# 6 subnets BLOB_SIDECAR_SUBNET_COUNT: 6 -# `uint64(6)` +# 6 blobs MAX_BLOBS_PER_BLOCK: 6 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK (= 128 * 6) sidecars +MAX_REQUEST_BLOB_SIDECARS: 768 # Electra -# 2**7 * 10**9 (= 128,000,000,000) -MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 -# 2**8 * 10**9 (= 256,000,000,000) -MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 -# `9` +# 9 subnets BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 -# `uint64(9)` +# 9 blobs MAX_BLOBS_PER_BLOCK_ELECTRA: 9 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA (= 128 * 9) sidecars MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 # Fulu +# 2**7 (= 128) groups NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS (= 128 * 128) sidecars MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +# 2**3 (= 8) samples SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 -# Gloas \ No newline at end of file +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + - EPOCH: 412672 # December 9, 2025, 02:21:11pm UTC + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 419072 # January 7, 2026, 01:01:11am UTC + MAX_BLOBS_PER_BLOCK: 21 + +# Gloas diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index 42afbb233e..6885667c6e 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -41,8 +41,7 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # Withdrawals processing # --------------------------------------------------------------- -# 2**3 ( = 8) pending withdrawals -MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 6 # Pending deposits processing # --------------------------------------------------------------- diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 2b03fda303..d1574be7cc 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -910,6 +910,22 @@ impl BeaconState { } } + /// Returns the block root at the last slot of `epoch - 1`. + /// + /// This can be deleted after Glamsterdam and the removal of the v1 proposer duties endpoint. + pub fn legacy_proposer_shuffling_decision_root_at_epoch( + &self, + epoch: Epoch, + head_block_root: Hash256, + ) -> Result { + let decision_slot = epoch.saturating_sub(1u64).end_slot(E::slots_per_epoch()); + if self.slot() <= decision_slot { + Ok(head_block_root) + } else { + self.get_block_root(decision_slot).copied() + } + } + /// Returns the block root which decided the proposer shuffling for the current epoch. This root /// can be used to key this proposer shuffling. /// diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index fd87c8c222..5dedd930c6 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -87,12 +87,18 @@ pub struct ChainSpec { */ pub genesis_delay: u64, pub seconds_per_slot: u64, + pub slot_duration_ms: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub max_seed_lookahead: Epoch, pub min_epochs_to_inactivity_penalty: u64, pub min_validator_withdrawability_delay: Epoch, pub shard_committee_period: u64, + pub proposer_reorg_cutoff_bps: u64, + pub attestation_due_bps: u64, + pub aggregate_due_bps: u64, + pub sync_message_due_bps: u64, + pub contribution_due_bps: u64, /* * Reward and penalty quotients @@ -964,12 +970,18 @@ impl ChainSpec { */ genesis_delay: 604800, // 7 days seconds_per_slot: 12, + slot_duration_ms: 12000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), max_seed_lookahead: Epoch::new(4), min_epochs_to_inactivity_penalty: 4, min_validator_withdrawability_delay: Epoch::new(256), shard_committee_period: 256, + proposer_reorg_cutoff_bps: 1667, + attestation_due_bps: 3333, + aggregate_due_bps: 6667, + sync_message_due_bps: 3333, + contribution_due_bps: 6667, /* * Reward and penalty quotients @@ -1098,7 +1110,7 @@ impl ChainSpec { * Fulu hard fork params */ fulu_fork_version: [0x06, 0x00, 0x00, 0x00], - fulu_fork_epoch: None, + fulu_fork_epoch: Some(Epoch::new(411392)), custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, @@ -1158,7 +1170,16 @@ impl ChainSpec { /* * Networking Fulu specific */ - blob_schedule: BlobSchedule::default(), + blob_schedule: BlobSchedule::new(vec![ + BlobParameters { + epoch: Epoch::new(412672), + max_blobs_per_block: 15, + }, + BlobParameters { + epoch: Epoch::new(419072), + max_blobs_per_block: 21, + }, + ]), min_epochs_for_data_column_sidecars_requests: default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), @@ -1310,12 +1331,18 @@ impl ChainSpec { */ genesis_delay: 6000, // 100 minutes seconds_per_slot: 5, + slot_duration_ms: 5000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), max_seed_lookahead: Epoch::new(4), min_epochs_to_inactivity_penalty: 4, min_validator_withdrawability_delay: Epoch::new(256), shard_committee_period: 256, + proposer_reorg_cutoff_bps: 1667, + attestation_due_bps: 3333, + aggregate_due_bps: 6667, + sync_message_due_bps: 3333, + contribution_due_bps: 6667, /* * Reward and penalty quotients @@ -1429,8 +1456,7 @@ impl ChainSpec { .expect("pow does not overflow"), whistleblower_reward_quotient_electra: u64::checked_pow(2, 12) .expect("pow does not overflow"), - max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 3) - .expect("pow does not overflow"), + max_pending_partials_per_withdrawals_sweep: 6, min_per_epoch_churn_limit_electra: option_wrapper(|| { u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) }) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 47b9902345..8e9d438a24 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -920,7 +920,7 @@ impl Tester { let cached_head = self.harness.chain.canonical_head.cached_head(); let next_slot = cached_head.snapshot.beacon_block.slot() + 1; let next_slot_epoch = next_slot.epoch(E::slots_per_epoch()); - let (proposer_indices, decision_root, _, fork) = + let (proposer_indices, decision_root, _, _, fork) = compute_proposer_duties_from_head(next_slot_epoch, &self.harness.chain).unwrap(); let proposer_index = proposer_indices[next_slot.as_usize() % E::slots_per_epoch() as usize];