Draft backfill reset

This commit is contained in:
dapplion
2025-04-04 18:53:36 -03:00
parent 89b9a023af
commit a5cbab7f3a
4 changed files with 18 additions and 15 deletions

View File

@@ -199,10 +199,14 @@ impl<E: EthSpec> Network<E> {
// Load initial CGC updates from persisted source (DB) or default to minimum CGC
let cgc_updates = initial_cgc_updates.unwrap_or_else(|| {
CGCUpdates::new(
ctx.chain_spec
.custody_group_count(config.subscribe_all_data_column_subnets),
)
let initial_cgc = if config.subscribe_all_data_column_subnets {
ctx.chain_spec.number_of_custody_groups
} else {
ctx.chain_spec.custody_requirement
};
// TODO(das): Consider setting the initial step not to slot 0, but to the first block in
// the node's DB. Such that we can adjust it when backfilling.
CGCUpdates::new(initial_cgc)
});
// Construct the metadata

View File

@@ -887,8 +887,6 @@ impl<T: BeaconChainTypes> NetworkService<T> {
.spec
.custody_group_by_balance(known_validators_balance);
// TODO(das): check the backfilled CGC and potentially update the network globals state
if next_cgc != prev_cgc {
// TODO(das): Should we consider the case where the clock is almost at the end of the epoch?
// If I/O is slow we may update the in-memory map for an epoch that's already
@@ -938,6 +936,14 @@ impl<T: BeaconChainTypes> NetworkService<T> {
}
}
// TODO(das): check the backfilled CGC and potentially update the network globals state
// IDEA:
// When we forward sync and finalize a new block, we may restart backfill again from a later
// block (the new finalized block). We will reset oldest_block to that block and fail
// backfill sync to start over from it. Then make backfill sync use a higher CGC (say 128)
// and when oldest_block is less than the oldest step with a value < 128 we can delete that
// step such that `custody_group_count(clock - da_window)` returns 128.
// Schedule an advertise CGC update for later
// TODO(das): use min_epochs_for_data_columns
let last_pruned_epoch =

View File

@@ -399,6 +399,8 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
// Compute custody column peers before sending the blocks_by_range request. If we don't have
// enough peers, error here.
let data_column_requests = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) {
// TODO(das): for backfill sync we want to return a different value if we are trying to
// close the gap
let column_indexes = self.network_globals().sampling_columns(request_start_slot);
Some(self.make_columns_by_range_requests(request.clone(), column_indexes)?)
} else {

View File

@@ -712,15 +712,6 @@ impl ChainSpec {
Ok(std::cmp::max(custody_column_count, self.samples_per_slot))
}
// TODO(das): delete in favor of custody_group_by_balance
pub fn custody_group_count(&self, is_supernode: bool) -> u64 {
if is_supernode {
self.number_of_custody_groups
} else {
self.custody_requirement
}
}
pub fn custody_group_by_balance(&self, balance_gwei: u64) -> u64 {
if balance_gwei == 0 {
self.custody_requirement