Fix wrong columns getting processed on a CGC change (#7792)

This PR fixes a bug where wrong columns could get processed immediately after a CGC increase.

Scenario:
- The node's CGC increased due to additional validators attached to it (lets say from 10 to 11)
- The new CGC is advertised and new subnets are subscribed immediately, however the change won't be effective in the data availability check until the next epoch (See [this](ab0e8870b4/beacon_node/beacon_chain/src/validator_custody.rs (L93-L99))). Data availability checker still only require 10 columns for the current epoch.
- During this time, data columns for the additional custody column (lets say column 11) may arrive via gossip as we're already subscribed to the topic, and it may be incorrectly used to satisfy the existing data availability requirement (10 columns), and result in this additional column (instead of a required one) getting persisted, resulting in database inconsistency.
This commit is contained in:
Jimmy Chen
2025-08-07 10:45:04 +10:00
committed by GitHub
parent 9c972201bc
commit 8bc6693dac
27 changed files with 577 additions and 277 deletions

View File

@@ -606,6 +606,15 @@ where
let chain = builder.build().expect("should build");
chain
.data_availability_checker
.custody_context()
.init_ordered_data_columns_from_custody_groups(
(0..spec.number_of_custody_groups).collect(),
&spec,
)
.expect("should initialise custody context");
BeaconChainHarness {
spec: chain.spec.clone(),
chain: Arc::new(chain),
@@ -773,13 +782,6 @@ where
(0..self.validator_keypairs.len()).collect()
}
pub fn get_sampling_column_count(&self) -> usize {
self.chain
.data_availability_checker
.custody_context()
.num_of_data_columns_to_sample(None, &self.chain.spec) as usize
}
pub fn slots_per_epoch(&self) -> u64 {
E::slots_per_epoch()
}
@@ -2385,7 +2387,8 @@ where
blob_items: Option<(KzgProofs<E>, BlobsList<E>)>,
) -> Result<RpcBlock<E>, BlockError> {
Ok(if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) {
let sampling_column_count = self.get_sampling_column_count();
let epoch = block.slot().epoch(E::slots_per_epoch());
let sampling_columns = self.chain.sampling_columns_for_epoch(epoch);
if blob_items.is_some_and(|(_, blobs)| !blobs.is_empty()) {
// Note: this method ignores the actual custody columns and just take the first
@@ -2393,7 +2396,7 @@ where
// currently have any knowledge of the columns being custodied.
let columns = generate_data_column_sidecars_from_block(&block, &self.spec)
.into_iter()
.take(sampling_column_count)
.filter(|d| sampling_columns.contains(&d.index))
.map(CustodyDataColumn::from_asserted_custody)
.collect::<Vec<_>>();
RpcBlock::new_with_custody_columns(Some(block_root), block, columns, &self.spec)?
@@ -3123,17 +3126,22 @@ where
let is_peerdas_enabled = self.chain.spec.is_peer_das_enabled_for_epoch(block.epoch());
if is_peerdas_enabled {
let custody_columns = custody_columns_opt.unwrap_or_else(|| {
let sampling_column_count = self.get_sampling_column_count() as u64;
(0..sampling_column_count).collect()
let epoch = block.slot().epoch(E::slots_per_epoch());
self.chain
.sampling_columns_for_epoch(epoch)
.iter()
.copied()
.collect()
});
let verified_columns = generate_data_column_sidecars_from_block(block, &self.spec)
.into_iter()
.filter(|c| custody_columns.contains(&c.index))
.map(|sidecar| {
let column_index = sidecar.index;
let subnet_id =
DataColumnSubnetId::from_column_index(sidecar.index, &self.spec);
self.chain
.verify_data_column_sidecar_for_gossip(sidecar, column_index)
.verify_data_column_sidecar_for_gossip(sidecar, subnet_id)
})
.collect::<Result<Vec<_>, _>>()
.unwrap();