mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-16 20:39:10 +00:00
Block availability data enum (#6866)
PeerDAS has undergone multiple refactors + the blending with the get_blobs optimization has generated technical debt.
A function signature like this
f008b84079/beacon_node/beacon_chain/src/beacon_chain.rs (L7171-L7178)
Allows at least the following combination of states:
- blobs: Some / None
- data_columns: Some / None
- data_column_recv: Some / None
- Block has data? Yes / No
- Block post-PeerDAS? Yes / No
In reality, we don't have that many possible states, only:
- `NoData`: pre-deneb, pre-PeerDAS with 0 blobs or post-PeerDAS with 0 blobs
- `Blobs(BlobSidecarList<E>)`: post-Deneb pre-PeerDAS with > 0 blobs
- `DataColumns(DataColumnSidecarList<E>)`: post-PeerDAS with > 0 blobs
- `DataColumnsRecv(oneshot::Receiver<DataColumnSidecarList<E>>)`: post-PeerDAS with > 0 blobs, but we obtained the columns via reconstruction
^ this are the variants of the new `AvailableBlockData` enum
So we go from 2^5 states to 4 well-defined. Downstream code benefits nicely from this clarity and I think it makes the whole feature much more maintainable.
Currently `is_available` returns a bool, and then we construct the available block in `make_available`. In a way the availability condition is duplicated in both functions. Instead, this PR constructs `AvailableBlockData` in `is_available` so the availability conditions are written once
```rust
if let Some(block_data) = is_available(..) {
let available_block = make_available(block_data);
}
```
This commit is contained in:
@@ -2517,18 +2517,13 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
|
||||
// Corrupt the signature on the 1st block to ensure that the backfill processor is checking
|
||||
// signatures correctly. Regression test for https://github.com/sigp/lighthouse/pull/5120.
|
||||
let mut batch_with_invalid_first_block = available_blocks.clone();
|
||||
let mut batch_with_invalid_first_block =
|
||||
available_blocks.iter().map(clone_block).collect::<Vec<_>>();
|
||||
batch_with_invalid_first_block[0] = {
|
||||
let (block_root, block, blobs, data_columns) = available_blocks[0].clone().deconstruct();
|
||||
let (block_root, block, data) = clone_block(&available_blocks[0]).deconstruct();
|
||||
let mut corrupt_block = (*block).clone();
|
||||
*corrupt_block.signature_mut() = Signature::empty();
|
||||
AvailableBlock::__new_for_testing(
|
||||
block_root,
|
||||
Arc::new(corrupt_block),
|
||||
blobs,
|
||||
data_columns,
|
||||
Arc::new(spec),
|
||||
)
|
||||
AvailableBlock::__new_for_testing(block_root, Arc::new(corrupt_block), data, Arc::new(spec))
|
||||
};
|
||||
|
||||
// Importing the invalid batch should error.
|
||||
@@ -2540,8 +2535,9 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
));
|
||||
|
||||
// Importing the batch with valid signatures should succeed.
|
||||
let available_blocks_dup = available_blocks.iter().map(clone_block).collect::<Vec<_>>();
|
||||
beacon_chain
|
||||
.import_historical_block_batch(available_blocks.clone())
|
||||
.import_historical_block_batch(available_blocks_dup)
|
||||
.unwrap();
|
||||
assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0);
|
||||
|
||||
@@ -3690,3 +3686,7 @@ fn get_blocks(
|
||||
.map(|checkpoint| checkpoint.beacon_block_root.into())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn clone_block<E: EthSpec>(block: &AvailableBlock<E>) -> AvailableBlock<E> {
|
||||
block.__clone_without_recv().unwrap()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user