Merge remote-tracking branch 'origin/stable' into unstable-merge-v8

This commit is contained in:
Michael Sproul
2025-11-04 16:08:34 +11:00
23 changed files with 505 additions and 96 deletions

View File

@@ -166,10 +166,17 @@ impl BeaconProposerCache {
}
/// Compute the proposer duties using the head state without cache.
///
/// Return:
/// - Proposer indices.
/// - True dependent root.
/// - Legacy dependent root (last block of epoch `N - 1`).
/// - Head execution status.
/// - Fork at `request_epoch`.
pub fn compute_proposer_duties_from_head<T: BeaconChainTypes>(
request_epoch: Epoch,
chain: &BeaconChain<T>,
) -> Result<(Vec<usize>, Hash256, ExecutionStatus, Fork), BeaconChainError> {
) -> Result<(Vec<usize>, Hash256, Hash256, ExecutionStatus, Fork), BeaconChainError> {
// Atomically collect information about the head whilst holding the canonical head `Arc` as
// short as possible.
let (mut state, head_state_root, head_block_root) = {
@@ -203,11 +210,23 @@ pub fn compute_proposer_duties_from_head<T: BeaconChainTypes>(
.proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec)
.map_err(BeaconChainError::from)?;
// This is only required because the V1 proposer duties endpoint spec wasn't updated for Fulu. We
// can delete this once the V1 endpoint is deprecated at the Glamsterdam fork.
let legacy_dependent_root = state
.legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root)
.map_err(BeaconChainError::from)?;
// Use fork_at_epoch rather than the state's fork, because post-Fulu we may not have advanced
// the state completely into the new epoch.
let fork = chain.spec.fork_at_epoch(request_epoch);
Ok((indices, dependent_root, execution_status, fork))
Ok((
indices,
dependent_root,
legacy_dependent_root,
execution_status,
fork,
))
}
/// If required, advance `state` to the epoch required to determine proposer indices in `target_epoch`.

View File

@@ -120,9 +120,7 @@ impl ValidatorRegistrations {
let effective_epoch =
(current_slot + effective_delay_slots).epoch(E::slots_per_epoch()) + 1;
self.epoch_validator_custody_requirements
.entry(effective_epoch)
.and_modify(|old_custody| *old_custody = validator_custody_requirement)
.or_insert(validator_custody_requirement);
.insert(effective_epoch, validator_custody_requirement);
Some((effective_epoch, validator_custody_requirement))
} else {
None
@@ -134,8 +132,17 @@ impl ValidatorRegistrations {
///
/// This is done by pruning all values on/after `effective_epoch` and updating the map to store
/// the latest validator custody requirements for the `effective_epoch`.
pub fn backfill_validator_custody_requirements(&mut self, effective_epoch: Epoch) {
pub fn backfill_validator_custody_requirements(
&mut self,
effective_epoch: Epoch,
expected_cgc: u64,
) {
if let Some(latest_validator_custody) = self.latest_validator_custody_requirement() {
// If the expected cgc isn't equal to the latest validator custody a very recent cgc change may have occurred.
// We should not update the mapping.
if expected_cgc != latest_validator_custody {
return;
}
// Delete records if
// 1. The epoch is greater than or equal than `effective_epoch`
// 2. the cgc requirements match the latest validator custody requirements
@@ -145,11 +152,25 @@ impl ValidatorRegistrations {
});
self.epoch_validator_custody_requirements
.entry(effective_epoch)
.and_modify(|old_custody| *old_custody = latest_validator_custody)
.or_insert(latest_validator_custody);
.insert(effective_epoch, latest_validator_custody);
}
}
/// Updates the `epoch -> cgc` map by pruning records before `effective_epoch`
/// while setting the `cgc` at `effective_epoch` to the latest validator custody requirement.
///
/// This is used to restart custody backfill sync at `effective_epoch`
pub fn reset_validator_custody_requirements(&mut self, effective_epoch: Epoch) {
if let Some(latest_validator_custody_requirements) =
self.latest_validator_custody_requirement()
{
self.epoch_validator_custody_requirements
.retain(|&epoch, _| epoch >= effective_epoch);
self.epoch_validator_custody_requirements
.insert(effective_epoch, latest_validator_custody_requirements);
};
}
}
/// Given the `validator_custody_units`, return the custody requirement based on
@@ -517,10 +538,22 @@ impl<E: EthSpec> CustodyContext<E> {
/// The node has completed backfill for this epoch. Update the internal records so the function
/// [`Self::custody_columns_for_epoch()`] returns up-to-date results.
pub fn update_and_backfill_custody_count_at_epoch(&self, effective_epoch: Epoch) {
pub fn update_and_backfill_custody_count_at_epoch(
&self,
effective_epoch: Epoch,
expected_cgc: u64,
) {
self.validator_registrations
.write()
.backfill_validator_custody_requirements(effective_epoch);
.backfill_validator_custody_requirements(effective_epoch, expected_cgc);
}
/// The node is attempting to restart custody backfill. Update the internal records so that
/// custody backfill can start backfilling at `effective_epoch`.
pub fn reset_validator_custody_requirements(&self, effective_epoch: Epoch) {
self.validator_registrations
.write()
.reset_validator_custody_requirements(effective_epoch);
}
}
@@ -604,11 +637,13 @@ mod tests {
custody_context: &CustodyContext<E>,
start_epoch: Epoch,
end_epoch: Epoch,
expected_cgc: u64,
) {
assert!(start_epoch >= end_epoch);
// Call from end_epoch down to start_epoch (inclusive), simulating backfill
for epoch in (end_epoch.as_u64()..=start_epoch.as_u64()).rev() {
custody_context.update_and_backfill_custody_count_at_epoch(Epoch::new(epoch));
custody_context
.update_and_backfill_custody_count_at_epoch(Epoch::new(epoch), expected_cgc);
}
}
@@ -1368,7 +1403,7 @@ mod tests {
);
// Backfill from epoch 20 down to 15 (simulating backfill)
complete_backfill_for_epochs(&custody_context, head_epoch, Epoch::new(15));
complete_backfill_for_epochs(&custody_context, head_epoch, Epoch::new(15), final_cgc);
// After backfilling to epoch 15, it should use latest CGC (32)
assert_eq!(
@@ -1406,7 +1441,7 @@ mod tests {
let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples);
// Backfill to epoch 15 (between the two CGC increases)
complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15));
complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15), final_cgc);
// Verify epochs 15 - 20 return latest CGC (32)
for epoch in 15..=20 {
@@ -1424,4 +1459,105 @@ mod tests {
);
}
}
#[test]
fn attempt_backfill_with_invalid_cgc() {
let spec = E::default_spec();
let initial_cgc = 8u64;
let mid_cgc = 16u64;
let final_cgc = 32u64;
// Setup: Node restart after multiple validator registrations causing CGC increases
let head_epoch = Epoch::new(20);
let epoch_and_cgc_tuples = vec![
(Epoch::new(0), initial_cgc),
(Epoch::new(10), mid_cgc),
(head_epoch, final_cgc),
];
let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples);
// Backfill to epoch 15 (between the two CGC increases)
complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(15), final_cgc);
// Verify epochs 15 - 20 return latest CGC (32)
for epoch in 15..=20 {
assert_eq!(
custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec),
final_cgc,
);
}
// Attempt backfill with an incorrect cgc value
complete_backfill_for_epochs(
&custody_context,
Epoch::new(20),
Epoch::new(15),
initial_cgc,
);
// Verify epochs 15 - 20 still return latest CGC (32)
for epoch in 15..=20 {
assert_eq!(
custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec),
final_cgc,
);
}
// Verify epochs 10-14 still return mid_cgc (16)
for epoch in 10..14 {
assert_eq!(
custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec),
mid_cgc,
);
}
}
#[test]
fn reset_validator_custody_requirements() {
let spec = E::default_spec();
let minimum_cgc = 4u64;
let initial_cgc = 8u64;
let mid_cgc = 16u64;
let final_cgc = 32u64;
// Setup: Node restart after multiple validator registrations causing CGC increases
let head_epoch = Epoch::new(20);
let epoch_and_cgc_tuples = vec![
(Epoch::new(0), initial_cgc),
(Epoch::new(10), mid_cgc),
(head_epoch, final_cgc),
];
let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples);
// Backfill from epoch 20 to 9
complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(9), final_cgc);
// Reset validator custody requirements to the latest cgc requirements at `head_epoch` up to the boundary epoch
custody_context.reset_validator_custody_requirements(head_epoch);
// Verify epochs 0 - 19 return the minimum cgc requirement because of the validator custody requirement reset
for epoch in 0..=19 {
assert_eq!(
custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec),
minimum_cgc,
);
}
// Verify epoch 20 returns a CGC of 32
assert_eq!(
custody_context.custody_group_count_at_epoch(head_epoch, &spec),
final_cgc
);
// Rerun Backfill to epoch 20
complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(0), final_cgc);
// Verify epochs 0 - 20 return the final cgc requirements
for epoch in 0..=20 {
assert_eq!(
custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec),
final_cgc,
);
}
}
}

View File

@@ -54,6 +54,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&self,
epoch: Epoch,
historical_data_column_sidecar_list: DataColumnSidecarList<T::EthSpec>,
expected_cgc: u64,
) -> Result<usize, HistoricalDataColumnError> {
let mut total_imported = 0;
let mut ops = vec![];
@@ -88,11 +89,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.get_data_column(&block_root, &data_column.index)?
.is_some()
{
debug!(
block_root = ?block_root,
column_index = data_column.index,
"Skipping data column import as identical data column exists"
);
continue;
}
if block_root != data_column.block_root() {
@@ -136,7 +132,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.data_availability_checker
.custody_context()
.update_and_backfill_custody_count_at_epoch(epoch);
.update_and_backfill_custody_count_at_epoch(epoch, expected_cgc);
self.safely_backfill_data_column_custody_info(epoch)
.map_err(|e| HistoricalDataColumnError::BeaconChainError(Box::new(e)))?;

View File

@@ -1561,7 +1561,7 @@ async fn proposer_duties_from_head_fulu() {
// Compute the proposer duties at the next epoch from the head
let next_epoch = head_state.next_epoch().unwrap();
let (_indices, dependent_root, _, fork) =
let (_indices, dependent_root, legacy_dependent_root, _, fork) =
compute_proposer_duties_from_head(next_epoch, &harness.chain).unwrap();
assert_eq!(
@@ -1570,6 +1570,8 @@ async fn proposer_duties_from_head_fulu() {
.proposer_shuffling_decision_root_at_epoch(next_epoch, head_block_root.into(), spec)
.unwrap()
);
assert_ne!(dependent_root, legacy_dependent_root);
assert_eq!(legacy_dependent_root, Hash256::from(head_block_root));
assert_eq!(fork, head_state.fork());
}
@@ -1617,7 +1619,7 @@ async fn proposer_lookahead_gloas_fork_epoch() {
assert_eq!(head_state.current_epoch(), gloas_fork_epoch - 1);
// Compute the proposer duties at the fork epoch from the head.
let (indices, dependent_root, _, fork) =
let (indices, dependent_root, legacy_dependent_root, _, fork) =
compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap();
assert_eq!(
@@ -1630,6 +1632,7 @@ async fn proposer_lookahead_gloas_fork_epoch() {
)
.unwrap()
);
assert_ne!(dependent_root, legacy_dependent_root);
assert_ne!(fork, head_state.fork());
assert_eq!(fork, spec.fork_at_epoch(gloas_fork_epoch));
@@ -1639,7 +1642,7 @@ async fn proposer_lookahead_gloas_fork_epoch() {
.add_attested_blocks_at_slots(head_state, head_state_root, &gloas_slots, &all_validators)
.await;
let (no_lookahead_indices, no_lookahead_dependent_root, _, no_lookahead_fork) =
let (no_lookahead_indices, no_lookahead_dependent_root, _, _, no_lookahead_fork) =
compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap();
assert_eq!(no_lookahead_indices, indices);
@@ -3182,6 +3185,8 @@ async fn weak_subjectivity_sync_test(
assert_eq!(store.get_anchor_info().state_upper_limit, Slot::new(0));
}
// This test prunes data columns from epoch 0 and then tries to re-import them via
// the same code paths that custody backfill sync imports data columns
#[tokio::test]
async fn test_import_historical_data_columns_batch() {
let spec = ForkName::Fulu.make_genesis_spec(E::default_spec());
@@ -3189,6 +3194,7 @@ async fn test_import_historical_data_columns_batch() {
let store = get_store_generic(&db_path, StoreConfig::default(), spec);
let start_slot = Epoch::new(0).start_slot(E::slots_per_epoch()) + 1;
let end_slot = Epoch::new(0).end_slot(E::slots_per_epoch());
let cgc = 128;
let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT);
@@ -3208,6 +3214,7 @@ async fn test_import_historical_data_columns_batch() {
let mut data_columns_list = vec![];
// Get all data columns for epoch 0
for block in block_root_iter {
let (block_root, _) = block.unwrap();
let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap();
@@ -3228,6 +3235,7 @@ async fn test_import_historical_data_columns_batch() {
harness.advance_slot();
// Prune data columns
harness
.chain
.store
@@ -3239,21 +3247,25 @@ async fn test_import_historical_data_columns_batch() {
.forwards_iter_block_roots_until(start_slot, end_slot)
.unwrap();
// Assert that data columns no longer exist for epoch 0
for block in block_root_iter {
let (block_root, _) = block.unwrap();
let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap();
assert!(data_columns.is_none())
}
// Re-import deleted data columns
harness
.chain
.import_historical_data_column_batch(Epoch::new(0), data_columns_list)
.import_historical_data_column_batch(Epoch::new(0), data_columns_list, cgc)
.unwrap();
let block_root_iter = harness
.chain
.forwards_iter_block_roots_until(start_slot, end_slot)
.unwrap();
// Assert that data columns now exist for epoch 0
for block in block_root_iter {
let (block_root, _) = block.unwrap();
if !harness
@@ -3272,6 +3284,7 @@ async fn test_import_historical_data_columns_batch() {
}
// This should verify that a data column sidecar containing mismatched block roots should fail to be imported.
// This also covers any test cases related to data columns with incorrect/invalid/mismatched block roots.
#[tokio::test]
async fn test_import_historical_data_columns_batch_mismatched_block_root() {
let spec = ForkName::Fulu.make_genesis_spec(E::default_spec());
@@ -3279,6 +3292,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() {
let store = get_store_generic(&db_path, StoreConfig::default(), spec);
let start_slot = Slot::new(1);
let end_slot = Slot::new(E::slots_per_epoch() * 2 - 1);
let cgc = 128;
let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT);
@@ -3298,6 +3312,8 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() {
let mut data_columns_list = vec![];
// Get all data columns from start_slot to end_slot
// and mutate the data columns with an invalid block root
for block in block_root_iter {
let (block_root, _) = block.unwrap();
let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap();
@@ -3323,6 +3339,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() {
harness.advance_slot();
// Prune blobs
harness
.chain
.store
@@ -3334,17 +3351,20 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() {
.forwards_iter_block_roots_until(start_slot, end_slot)
.unwrap();
// Assert there are no columns between start_slot and end_slot
for block in block_root_iter {
let (block_root, _) = block.unwrap();
let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap();
assert!(data_columns.is_none())
}
// Attempt to import data columns with invalid block roots and expect a failure
let error = harness
.chain
.import_historical_data_column_batch(
start_slot.epoch(E::slots_per_epoch()),
data_columns_list,
cgc,
)
.unwrap_err();
@@ -3367,6 +3387,7 @@ async fn test_import_historical_data_columns_batch_no_block_found() {
let store = get_store_generic(&db_path, StoreConfig::default(), spec);
let start_slot = Slot::new(1);
let end_slot = Slot::new(E::slots_per_epoch() * 2 - 1);
let cgc = 128;
let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT);
@@ -3428,7 +3449,7 @@ async fn test_import_historical_data_columns_batch_no_block_found() {
let error = harness
.chain
.import_historical_data_column_batch(Epoch::new(0), data_columns_list)
.import_historical_data_column_batch(Epoch::new(0), data_columns_list, cgc)
.unwrap_err();
assert!(matches!(