mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-29 02:33:48 +00:00
Implement PeerDAS subnet decoupling (aka custody groups) (#6736)
* Implement PeerDAS subnet decoupling (aka custody groups). * Merge branch 'unstable' into decouple-subnets * Refactor feature testing for spec tests (#6737) Squashed commit of the following: commit898d05ee17Merge:ffbd25e2b7e0cddef3Author: Jimmy Chen <jchen.tc@gmail.com> Date: Tue Dec 24 14:41:19 2024 +1100 Merge branch 'unstable' into refactor-ef-tests-features commitffbd25e2beAuthor: Jimmy Chen <jchen.tc@gmail.com> Date: Tue Dec 24 14:40:38 2024 +1100 Fix `SszStatic` tests for PeerDAS: exclude eip7594 test vectors when testing Electra types. commitaa593cf35cAuthor: Jimmy Chen <jchen.tc@gmail.com> Date: Fri Dec 20 12:08:54 2024 +1100 Refactor spec testing for features and simplify usage. * Fix build. * Add input validation and improve arithmetic handling when calculating custody groups. * Address review comments re code style consistency. * Merge branch 'unstable' into decouple-subnets # Conflicts: # beacon_node/beacon_chain/src/kzg_utils.rs # beacon_node/beacon_chain/src/observed_data_sidecars.rs # beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs # common/eth2_network_config/built_in_network_configs/chiado/config.yaml # common/eth2_network_config/built_in_network_configs/gnosis/config.yaml # common/eth2_network_config/built_in_network_configs/holesky/config.yaml # common/eth2_network_config/built_in_network_configs/mainnet/config.yaml # common/eth2_network_config/built_in_network_configs/sepolia/config.yaml # consensus/types/src/chain_spec.rs * Update consensus/types/src/chain_spec.rs Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com> * Merge remote-tracking branch 'origin/unstable' into decouple-subnets * Update error handling. * Address review comment. * Merge remote-tracking branch 'origin/unstable' into decouple-subnets # Conflicts: # consensus/types/src/chain_spec.rs * Update PeerDAS spec tests to `1.5.0-beta.0` and fix failing unit tests. * Merge remote-tracking branch 'origin/unstable' into decouple-subnets # Conflicts: # beacon_node/lighthouse_network/src/peer_manager/mod.rs
This commit is contained in:
@@ -165,7 +165,7 @@ impl<E: EthSpec> RpcBlock<E> {
|
||||
let inner = if !custody_columns.is_empty() {
|
||||
RpcBlockInner::BlockAndCustodyColumns(
|
||||
block,
|
||||
RuntimeVariableList::new(custody_columns, spec.number_of_columns)?,
|
||||
RuntimeVariableList::new(custody_columns, spec.number_of_columns as usize)?,
|
||||
)
|
||||
} else {
|
||||
RpcBlockInner::Block(block)
|
||||
|
||||
@@ -117,21 +117,16 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
spec: Arc<ChainSpec>,
|
||||
log: Logger,
|
||||
) -> Result<Self, AvailabilityCheckError> {
|
||||
let custody_subnet_count = if import_all_data_columns {
|
||||
spec.data_column_sidecar_subnet_count as usize
|
||||
} else {
|
||||
spec.custody_requirement as usize
|
||||
};
|
||||
|
||||
let subnet_sampling_size =
|
||||
std::cmp::max(custody_subnet_count, spec.samples_per_slot as usize);
|
||||
let sampling_column_count =
|
||||
subnet_sampling_size.saturating_mul(spec.data_columns_per_subnet());
|
||||
let custody_group_count = spec.custody_group_count(import_all_data_columns);
|
||||
// This should only panic if the chain spec contains invalid values.
|
||||
let sampling_size = spec
|
||||
.sampling_size(custody_group_count)
|
||||
.expect("should compute node sampling size from valid chain spec");
|
||||
|
||||
let inner = DataAvailabilityCheckerInner::new(
|
||||
OVERFLOW_LRU_CAPACITY,
|
||||
store,
|
||||
sampling_column_count,
|
||||
sampling_size as usize,
|
||||
spec.clone(),
|
||||
)?;
|
||||
Ok(Self {
|
||||
@@ -148,7 +143,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
}
|
||||
|
||||
pub(crate) fn is_supernode(&self) -> bool {
|
||||
self.get_sampling_column_count() == self.spec.number_of_columns
|
||||
self.get_sampling_column_count() == self.spec.number_of_columns as usize
|
||||
}
|
||||
|
||||
/// Checks if the block root is currenlty in the availability cache awaiting import because
|
||||
@@ -433,7 +428,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
.map(CustodyDataColumn::into_inner)
|
||||
.collect::<Vec<_>>();
|
||||
let all_data_columns =
|
||||
RuntimeVariableList::from_vec(all_data_columns, self.spec.number_of_columns);
|
||||
RuntimeVariableList::from_vec(all_data_columns, self.spec.number_of_columns as usize);
|
||||
|
||||
// verify kzg for all data columns at once
|
||||
if !all_data_columns.is_empty() {
|
||||
|
||||
@@ -598,7 +598,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
|
||||
|
||||
// If we're sampling all columns, it means we must be custodying all columns.
|
||||
let custody_column_count = self.sampling_column_count();
|
||||
let total_column_count = self.spec.number_of_columns;
|
||||
let total_column_count = self.spec.number_of_columns as usize;
|
||||
let received_column_count = pending_components.verified_data_columns.len();
|
||||
|
||||
if pending_components.reconstruction_started {
|
||||
@@ -607,7 +607,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
|
||||
if custody_column_count != total_column_count {
|
||||
return ReconstructColumnsDecision::No("not required for full node");
|
||||
}
|
||||
if received_column_count == self.spec.number_of_columns {
|
||||
if received_column_count >= total_column_count {
|
||||
return ReconstructColumnsDecision::No("all columns received");
|
||||
}
|
||||
if received_column_count < total_column_count / 2 {
|
||||
|
||||
@@ -423,7 +423,7 @@ fn verify_data_column_sidecar<E: EthSpec>(
|
||||
data_column: &DataColumnSidecar<E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), GossipDataColumnError> {
|
||||
if data_column.index >= spec.number_of_columns as u64 {
|
||||
if data_column.index >= spec.number_of_columns {
|
||||
return Err(GossipDataColumnError::InvalidColumnIndex(data_column.index));
|
||||
}
|
||||
if data_column.kzg_commitments.is_empty() {
|
||||
@@ -611,7 +611,7 @@ fn verify_index_matches_subnet<E: EthSpec>(
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), GossipDataColumnError> {
|
||||
let expected_subnet: u64 =
|
||||
DataColumnSubnetId::from_column_index::<E>(data_column.index as usize, spec).into();
|
||||
DataColumnSubnetId::from_column_index(data_column.index, spec).into();
|
||||
if expected_subnet != subnet {
|
||||
return Err(GossipDataColumnError::InvalidSubnetId {
|
||||
received: subnet,
|
||||
|
||||
@@ -193,7 +193,7 @@ fn build_data_column_sidecars<E: EthSpec>(
|
||||
blob_cells_and_proofs_vec: Vec<CellsAndKzgProofs>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<DataColumnSidecarList<E>, String> {
|
||||
let number_of_columns = spec.number_of_columns;
|
||||
let number_of_columns = spec.number_of_columns as usize;
|
||||
let max_blobs_per_block = spec
|
||||
.max_blobs_per_block(signed_block_header.message.slot.epoch(E::slots_per_epoch()))
|
||||
as usize;
|
||||
@@ -428,7 +428,7 @@ mod test {
|
||||
.kzg_commitments_merkle_proof()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(column_sidecars.len(), spec.number_of_columns);
|
||||
assert_eq!(column_sidecars.len(), spec.number_of_columns as usize);
|
||||
for (idx, col_sidecar) in column_sidecars.iter().enumerate() {
|
||||
assert_eq!(col_sidecar.index, idx as u64);
|
||||
|
||||
@@ -461,7 +461,7 @@ mod test {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
for i in 0..spec.number_of_columns {
|
||||
for i in 0..spec.number_of_columns as usize {
|
||||
assert_eq!(reconstructed_columns.get(i), column_sidecars.get(i), "{i}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ impl<E: EthSpec> ObservableDataSidecar for DataColumnSidecar<E> {
|
||||
}
|
||||
|
||||
fn max_num_of_items(spec: &ChainSpec, _slot: Slot) -> usize {
|
||||
spec.number_of_columns
|
||||
spec.number_of_columns as usize
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user