mirror of
https://github.com/sigp/lighthouse.git
synced 2026-05-07 16:55:46 +00:00
Remove CGC from data_availability checker (#7033)
- Part of https://github.com/sigp/lighthouse/issues/6767 Validator custody makes the CGC and set of sampling columns dynamic. Right now this information is stored twice: - in the data availability checker - in the network globals If that state becomes dynamic we must make sure it is in sync updating it twice, or guarding it behind a mutex. However, I noted that we don't really have to keep the CGC inside the data availability checker. All consumers can actually read it from the network globals, and we can update `make_available` to read the expected count of data columns from the block.
This commit is contained in:
@@ -33,6 +33,8 @@ pub struct NetworkGlobals<E: EthSpec> {
|
||||
/// The computed sampling subnets and columns is stored to avoid re-computing.
|
||||
pub sampling_subnets: HashSet<DataColumnSubnetId>,
|
||||
pub sampling_columns: HashSet<ColumnIndex>,
|
||||
/// Constant custody group count (CGC) set at startup
|
||||
custody_group_count: u64,
|
||||
/// Network-related configuration. Immutable after initialization.
|
||||
pub config: Arc<NetworkConfig>,
|
||||
/// Ethereum chain configuration. Immutable after initialization.
|
||||
@@ -48,47 +50,43 @@ impl<E: EthSpec> NetworkGlobals<E> {
|
||||
config: Arc<NetworkConfig>,
|
||||
spec: Arc<ChainSpec>,
|
||||
) -> Self {
|
||||
let (sampling_subnets, sampling_columns) = if spec.is_peer_das_scheduled() {
|
||||
let node_id = enr.node_id().raw();
|
||||
let node_id = enr.node_id().raw();
|
||||
|
||||
let custody_group_count = match local_metadata.custody_group_count() {
|
||||
Ok(&cgc) if cgc <= spec.number_of_custody_groups => cgc,
|
||||
_ => {
|
||||
let custody_group_count = match local_metadata.custody_group_count() {
|
||||
Ok(&cgc) if cgc <= spec.number_of_custody_groups => cgc,
|
||||
_ => {
|
||||
if spec.is_peer_das_scheduled() {
|
||||
error!(
|
||||
info = "falling back to default custody requirement",
|
||||
"custody_group_count from metadata is either invalid or not set. This is a bug!"
|
||||
);
|
||||
spec.custody_requirement
|
||||
}
|
||||
};
|
||||
|
||||
// The below `expect` calls will panic on start up if the chain spec config values used
|
||||
// are invalid
|
||||
let sampling_size = spec
|
||||
.sampling_size(custody_group_count)
|
||||
.expect("should compute node sampling size from valid chain spec");
|
||||
let custody_groups = get_custody_groups(node_id, sampling_size, &spec)
|
||||
.expect("should compute node custody groups");
|
||||
|
||||
let mut sampling_subnets = HashSet::new();
|
||||
for custody_index in &custody_groups {
|
||||
let subnets = compute_subnets_from_custody_group(*custody_index, &spec)
|
||||
.expect("should compute custody subnets for node");
|
||||
sampling_subnets.extend(subnets);
|
||||
spec.custody_requirement
|
||||
}
|
||||
|
||||
let mut sampling_columns = HashSet::new();
|
||||
for custody_index in &custody_groups {
|
||||
let columns = compute_columns_for_custody_group(*custody_index, &spec)
|
||||
.expect("should compute custody columns for node");
|
||||
sampling_columns.extend(columns);
|
||||
}
|
||||
|
||||
(sampling_subnets, sampling_columns)
|
||||
} else {
|
||||
(HashSet::new(), HashSet::new())
|
||||
};
|
||||
|
||||
// The below `expect` calls will panic on start up if the chain spec config values used
|
||||
// are invalid
|
||||
let sampling_size = spec
|
||||
.sampling_size(custody_group_count)
|
||||
.expect("should compute node sampling size from valid chain spec");
|
||||
let custody_groups = get_custody_groups(node_id, sampling_size, &spec)
|
||||
.expect("should compute node custody groups");
|
||||
|
||||
let mut sampling_subnets = HashSet::new();
|
||||
for custody_index in &custody_groups {
|
||||
let subnets = compute_subnets_from_custody_group(*custody_index, &spec)
|
||||
.expect("should compute custody subnets for node");
|
||||
sampling_subnets.extend(subnets);
|
||||
}
|
||||
|
||||
let mut sampling_columns = HashSet::new();
|
||||
for custody_index in &custody_groups {
|
||||
let columns = compute_columns_for_custody_group(*custody_index, &spec)
|
||||
.expect("should compute custody columns for node");
|
||||
sampling_columns.extend(columns);
|
||||
}
|
||||
|
||||
NetworkGlobals {
|
||||
local_enr: RwLock::new(enr.clone()),
|
||||
peer_id: RwLock::new(enr.peer_id()),
|
||||
@@ -100,6 +98,7 @@ impl<E: EthSpec> NetworkGlobals<E> {
|
||||
backfill_state: RwLock::new(BackFillState::Paused),
|
||||
sampling_subnets,
|
||||
sampling_columns,
|
||||
custody_group_count,
|
||||
config,
|
||||
spec,
|
||||
}
|
||||
@@ -121,6 +120,19 @@ impl<E: EthSpec> NetworkGlobals<E> {
|
||||
self.listen_multiaddrs.read().clone()
|
||||
}
|
||||
|
||||
/// Returns true if this node is configured as a PeerDAS supernode
|
||||
pub fn is_supernode(&self) -> bool {
|
||||
self.custody_group_count == self.spec.number_of_custody_groups
|
||||
}
|
||||
|
||||
/// Returns the count of custody columns this node must sample for block import
|
||||
pub fn custody_columns_count(&self) -> u64 {
|
||||
// This only panics if the chain spec contains invalid values
|
||||
self.spec
|
||||
.sampling_size(self.custody_group_count)
|
||||
.expect("should compute node sampling size from valid chain spec")
|
||||
}
|
||||
|
||||
/// Returns the number of libp2p connected peers.
|
||||
pub fn connected_peers(&self) -> usize {
|
||||
self.peers.read().connected_peer_ids().count()
|
||||
|
||||
Reference in New Issue
Block a user