mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-09 11:41:51 +00:00
Data column custody info (#7648)
#7647 Introduces a new record in the blobs db `DataColumnCustodyInfo` When `DataColumnCustodyInfo` exists in the db this indicates that a recent cgc change has occurred and/or that a custody backfill sync is currently in progress (custody backfill will be added as a separate PR). When a cgc change has occurred `earliest_available_slot` will be equal to the slot at which the cgc change occured. During custody backfill sync`earliest_available_slot` should be updated incrementally as it progresses. ~~Note that if `advertise_false_custody_group_count` is enabled we do not add a `DataColumnCustodyInfo` record in the db as that would affect the status v2 response.~~ (See comment https://github.com/sigp/lighthouse/pull/7648#discussion_r2212403389) ~~If `DataColumnCustodyInfo` doesn't exist in the db this indicates that we have fulfilled our custody requirements up to the DA window.~~ (It now always exist, and the slot will be set to `None` once backfill is complete) StatusV2 now uses `DataColumnCustodyInfo` to calculate the `earliest_available_slot` if a `DataColumnCustodyInfo` record exists in the db, if it's `None`, then we return the `oldest_block_slot`.
This commit is contained in:
@@ -9,12 +9,14 @@ use operation_pool::PersistedOperationPool;
|
||||
use ssz::Encode;
|
||||
use std::sync::{Arc, LazyLock};
|
||||
use store::{
|
||||
database::interface::BeaconNodeBackend, hot_cold_store::Split, metadata::DataColumnInfo,
|
||||
database::interface::BeaconNodeBackend,
|
||||
hot_cold_store::Split,
|
||||
metadata::{DataColumnCustodyInfo, DataColumnInfo},
|
||||
DBColumn, HotColdDB, StoreConfig, StoreItem,
|
||||
};
|
||||
use strum::IntoEnumIterator;
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec};
|
||||
use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec, Slot};
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
type Store<E> = Arc<HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>>;
|
||||
@@ -84,11 +86,13 @@ async fn schema_stability() {
|
||||
|
||||
chain.persist_op_pool().unwrap();
|
||||
chain.persist_custody_context().unwrap();
|
||||
insert_data_column_custody_info(&store, &harness.spec);
|
||||
|
||||
check_db_columns();
|
||||
check_metadata_sizes(&store);
|
||||
check_op_pool(&store);
|
||||
check_custody_context(&store, &harness.spec);
|
||||
check_custody_info(&store, &harness.spec);
|
||||
check_persisted_chain(&store);
|
||||
|
||||
// Not covered here:
|
||||
@@ -100,13 +104,21 @@ async fn schema_stability() {
|
||||
fn check_db_columns() {
|
||||
let current_columns: Vec<&'static str> = DBColumn::iter().map(|c| c.as_str()).collect();
|
||||
let expected_columns = vec![
|
||||
"bma", "blk", "blb", "bdc", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", "bst",
|
||||
"exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", "brm",
|
||||
"dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy",
|
||||
"bma", "blk", "blb", "bdc", "bdi", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs",
|
||||
"bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr",
|
||||
"brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy",
|
||||
];
|
||||
assert_eq!(expected_columns, current_columns);
|
||||
}
|
||||
|
||||
fn insert_data_column_custody_info(store: &Store<E>, spec: &ChainSpec) {
|
||||
if spec.is_peer_das_scheduled() {
|
||||
store
|
||||
.put_data_column_custody_info(Some(Slot::new(0)))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the SSZ sizes of known on-disk metadata.
|
||||
///
|
||||
/// New types can be added here as the schema evolves.
|
||||
@@ -122,6 +134,7 @@ fn check_metadata_sizes(store: &Store<E>) {
|
||||
}
|
||||
);
|
||||
assert_eq!(DataColumnInfo::default().ssz_bytes_len(), 5);
|
||||
assert_eq!(DataColumnCustodyInfo::default().ssz_bytes_len(), 5);
|
||||
}
|
||||
|
||||
fn check_op_pool(store: &Store<E>) {
|
||||
@@ -143,6 +156,15 @@ fn check_custody_context(store: &Store<E>, spec: &ChainSpec) {
|
||||
}
|
||||
}
|
||||
|
||||
fn check_custody_info(store: &Store<E>, spec: &ChainSpec) {
|
||||
let data_column_custody_info = store.get_data_column_custody_info().unwrap();
|
||||
if spec.is_peer_das_scheduled() {
|
||||
assert_eq!(data_column_custody_info.unwrap().as_ssz_bytes().len(), 13);
|
||||
} else {
|
||||
assert!(data_column_custody_info.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
fn check_persisted_chain(store: &Store<E>) {
|
||||
let chain = store
|
||||
.get_item::<PersistedBeaconChain>(&Hash256::ZERO)
|
||||
|
||||
@@ -3157,7 +3157,11 @@ async fn schema_downgrade_to_min_version(
|
||||
)
|
||||
.await;
|
||||
|
||||
let min_version = SchemaVersion(22);
|
||||
let min_version = if spec.is_fulu_scheduled() {
|
||||
SchemaVersion(27)
|
||||
} else {
|
||||
SchemaVersion(22)
|
||||
};
|
||||
|
||||
// Save the slot clock so that the new harness doesn't revert in time.
|
||||
let slot_clock = harness.chain.slot_clock.clone();
|
||||
|
||||
Reference in New Issue
Block a user