Data column custody info (#7648)

#7647


  Introduces a new record in the blobs db `DataColumnCustodyInfo`

When `DataColumnCustodyInfo` exists in the db this indicates that a recent cgc change has occurred and/or that a custody backfill sync is currently in progress (custody backfill will be added as a separate PR). When a cgc change has occurred `earliest_available_slot` will be equal to the slot at which the cgc change occured. During custody backfill sync`earliest_available_slot` should be updated incrementally as it progresses.

~~Note that if `advertise_false_custody_group_count` is enabled we do not add a `DataColumnCustodyInfo` record in the db as that would affect the status v2 response.~~
(See comment https://github.com/sigp/lighthouse/pull/7648#discussion_r2212403389)

~~If `DataColumnCustodyInfo` doesn't exist in the db this indicates that we have fulfilled our custody requirements up to the DA window.~~
(It now always exist, and the slot will be set to `None` once backfill is complete)

StatusV2 now uses `DataColumnCustodyInfo` to calculate the `earliest_available_slot` if a `DataColumnCustodyInfo` record exists in the db, if it's `None`, then we return the `oldest_block_slot`.
This commit is contained in:
Eitan Seri-Levi
2025-07-22 15:30:30 +02:00
committed by GitHub
parent b48879a566
commit db8b6be9df
11 changed files with 185 additions and 13 deletions

View File

@@ -6807,6 +6807,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.map(|duration| (next_digest_epoch, duration))
}
/// Update data column custody info with the slot at which cgc was changed.
pub fn update_data_column_custody_info(&self, slot: Option<Slot>) {
self.store
.put_data_column_custody_info(slot)
.unwrap_or_else(
|e| tracing::error!(error = ?e, "Failed to update data column custody info"),
);
}
/// This method serves to get a sense of the current chain health. It is used in block proposal
/// to determine whether we should outsource payload production duties.
///

View File

@@ -3,6 +3,7 @@ mod migration_schema_v23;
mod migration_schema_v24;
mod migration_schema_v25;
mod migration_schema_v26;
mod migration_schema_v27;
use crate::beacon_chain::BeaconChainTypes;
use std::sync::Arc;
@@ -67,6 +68,17 @@ pub fn migrate_schema<T: BeaconChainTypes>(
let ops = migration_schema_v26::downgrade_from_v26::<T>(db.clone())?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(26), SchemaVersion(27)) => {
// This migration updates the blobs db. The schema version
// is bumped inside upgrade_to_v27.
migration_schema_v27::upgrade_to_v27::<T>(db.clone())
}
(SchemaVersion(27), SchemaVersion(26)) => {
// Downgrading is essentially a no-op and is only possible
// if peer das isn't scheduled.
migration_schema_v27::downgrade_from_v27::<T>(db.clone())?;
db.store_schema_version_atomically(to, vec![])
}
// Anything else is an error.
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
target_version: to,

View File

@@ -0,0 +1,26 @@
use crate::BeaconChainTypes;
use std::sync::Arc;
use store::{metadata::SchemaVersion, Error, HotColdDB};
/// Add `DataColumnCustodyInfo` entry to v27.
pub fn upgrade_to_v27<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
) -> Result<(), Error> {
if db.spec.is_peer_das_scheduled() {
db.put_data_column_custody_info(None)?;
db.store_schema_version_atomically(SchemaVersion(27), vec![])?;
}
Ok(())
}
pub fn downgrade_from_v27<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
) -> Result<(), Error> {
if db.spec.is_peer_das_scheduled() {
return Err(Error::MigrationError(
"Cannot downgrade from v27 if peerDAS is scheduled".to_string(),
));
}
Ok(())
}

View File

@@ -217,6 +217,7 @@ impl CustodyContext {
new_custody_group_count: updated_cgc,
sampling_count: self
.num_of_custody_groups_to_sample(Some(effective_epoch), spec),
effective_epoch,
});
}
}
@@ -287,6 +288,7 @@ impl CustodyContext {
pub struct CustodyCountChanged {
pub new_custody_group_count: u64,
pub sampling_count: u64,
pub effective_epoch: Epoch,
}
/// The custody information that gets persisted across runs.

View File

@@ -9,12 +9,14 @@ use operation_pool::PersistedOperationPool;
use ssz::Encode;
use std::sync::{Arc, LazyLock};
use store::{
database::interface::BeaconNodeBackend, hot_cold_store::Split, metadata::DataColumnInfo,
database::interface::BeaconNodeBackend,
hot_cold_store::Split,
metadata::{DataColumnCustodyInfo, DataColumnInfo},
DBColumn, HotColdDB, StoreConfig, StoreItem,
};
use strum::IntoEnumIterator;
use tempfile::{tempdir, TempDir};
use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec};
use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec, Slot};
type E = MainnetEthSpec;
type Store<E> = Arc<HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>>;
@@ -84,11 +86,13 @@ async fn schema_stability() {
chain.persist_op_pool().unwrap();
chain.persist_custody_context().unwrap();
insert_data_column_custody_info(&store, &harness.spec);
check_db_columns();
check_metadata_sizes(&store);
check_op_pool(&store);
check_custody_context(&store, &harness.spec);
check_custody_info(&store, &harness.spec);
check_persisted_chain(&store);
// Not covered here:
@@ -100,13 +104,21 @@ async fn schema_stability() {
fn check_db_columns() {
let current_columns: Vec<&'static str> = DBColumn::iter().map(|c| c.as_str()).collect();
let expected_columns = vec![
"bma", "blk", "blb", "bdc", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", "bst",
"exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", "brm",
"dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy",
"bma", "blk", "blb", "bdc", "bdi", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs",
"bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr",
"brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy",
];
assert_eq!(expected_columns, current_columns);
}
fn insert_data_column_custody_info(store: &Store<E>, spec: &ChainSpec) {
if spec.is_peer_das_scheduled() {
store
.put_data_column_custody_info(Some(Slot::new(0)))
.unwrap();
}
}
/// Check the SSZ sizes of known on-disk metadata.
///
/// New types can be added here as the schema evolves.
@@ -122,6 +134,7 @@ fn check_metadata_sizes(store: &Store<E>) {
}
);
assert_eq!(DataColumnInfo::default().ssz_bytes_len(), 5);
assert_eq!(DataColumnCustodyInfo::default().ssz_bytes_len(), 5);
}
fn check_op_pool(store: &Store<E>) {
@@ -143,6 +156,15 @@ fn check_custody_context(store: &Store<E>, spec: &ChainSpec) {
}
}
fn check_custody_info(store: &Store<E>, spec: &ChainSpec) {
let data_column_custody_info = store.get_data_column_custody_info().unwrap();
if spec.is_peer_das_scheduled() {
assert_eq!(data_column_custody_info.unwrap().as_ssz_bytes().len(), 13);
} else {
assert!(data_column_custody_info.is_none());
}
}
fn check_persisted_chain(store: &Store<E>) {
let chain = store
.get_item::<PersistedBeaconChain>(&Hash256::ZERO)

View File

@@ -3157,7 +3157,11 @@ async fn schema_downgrade_to_min_version(
)
.await;
let min_version = SchemaVersion(22);
let min_version = if spec.is_fulu_scheduled() {
SchemaVersion(27)
} else {
SchemaVersion(22)
};
// Save the slot clock so that the new harness doesn't revert in time.
let slot_clock = harness.chain.slot_clock.clone();