add processing and processed caching to the DA checker (#4732)

* add processing and processed caching to the DA checker

* move processing cache out of critical cache

* get it compiling

* fix lints

* add docs to `AvailabilityView`

* some self review

* fix lints

* fix beacon chain tests

* cargo fmt

* make availability view easier to implement, start on testing

* move child component cache and finish test

* cargo fix

* cargo fix

* cargo fix

* fmt and lint

* make blob commitments not optional, rename some caches, add missing blobs struct

* Update beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs

Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>

* marks review feedback and other general cleanup

* cargo fix

* improve availability view docs

* some renames

* some renames and docs

* fix should delay lookup logic

* get rid of some wrapper methods

* fix up single lookup changes

* add a couple docs

* add single blob merge method and improve process_... docs

* update some names

* lints

* fix merge

* remove blob indices from lookup creation log

* remove blob indices from lookup creation log

* delayed lookup logging improvement

* check fork choice before doing any blob processing

* remove unused dep

* Update beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* Update beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* Update beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* Update beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* Update beacon_node/network/src/sync/block_lookups/delayed_lookup.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* remove duplicate deps

* use gen range in random blobs geneartor

* rename processing cache fields

* require block root in rpc block construction and check block root consistency

* send peers as vec in single message

* spawn delayed lookup service from network beacon processor

* fix tests

---------

Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
This commit is contained in:
realbigsean
2023-10-03 09:59:33 -04:00
committed by GitHub
parent 67aeb6bf6b
commit c7ddf1f0b1
38 changed files with 1894 additions and 1190 deletions

View File

@@ -161,7 +161,7 @@ fn chain_segment_blocks(
.iter()
.zip(blobs.into_iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect()
}
@@ -204,17 +204,49 @@ fn update_proposal_signatures(
}
}
fn update_parent_roots(snapshots: &mut [BeaconSnapshot<E>]) {
fn update_parent_roots(
snapshots: &mut [BeaconSnapshot<E>],
blobs: &mut [Option<BlobSidecarList<E>>],
) {
for i in 0..snapshots.len() {
let root = snapshots[i].beacon_block.canonical_root();
if let Some(child) = snapshots.get_mut(i + 1) {
if let (Some(child), Some(child_blobs)) = (snapshots.get_mut(i + 1), blobs.get_mut(i + 1)) {
let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct();
*block.parent_root_mut() = root;
child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature))
let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature));
let new_child_root = new_child.canonical_root();
child.beacon_block = new_child;
if let Some(blobs) = child_blobs {
update_blob_roots(new_child_root, blobs);
}
}
}
}
fn update_blob_roots<E: EthSpec>(block_root: Hash256, blobs: &mut BlobSidecarList<E>) {
for old_blob_sidecar in blobs.iter_mut() {
let index = old_blob_sidecar.index;
let slot = old_blob_sidecar.slot;
let block_parent_root = old_blob_sidecar.block_parent_root;
let proposer_index = old_blob_sidecar.proposer_index;
let blob = old_blob_sidecar.blob.clone();
let kzg_commitment = old_blob_sidecar.kzg_commitment;
let kzg_proof = old_blob_sidecar.kzg_proof;
let new_blob = Arc::new(BlobSidecar::<E> {
block_root,
index,
slot,
block_parent_root,
proposer_index,
blob,
kzg_commitment,
kzg_proof,
});
*old_blob_sidecar = new_blob;
}
}
#[tokio::test]
async fn chain_segment_full_segment() {
let harness = get_harness(VALIDATOR_COUNT);
@@ -328,7 +360,10 @@ async fn chain_segment_non_linear_parent_roots() {
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.parent_root_mut() = Hash256::zero();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
blocks[3] = RpcBlock::new_without_blobs(
None,
Arc::new(SignedBeaconBlock::from_block(block, signature)),
);
assert!(
matches!(
@@ -362,7 +397,10 @@ async fn chain_segment_non_linear_slots() {
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = Slot::new(0);
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
blocks[3] = RpcBlock::new_without_blobs(
None,
Arc::new(SignedBeaconBlock::from_block(block, signature)),
);
assert!(
matches!(
@@ -386,7 +424,10 @@ async fn chain_segment_non_linear_slots() {
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = blocks[2].slot();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
blocks[3] = RpcBlock::new_without_blobs(
None,
Arc::new(SignedBeaconBlock::from_block(block, signature)),
);
assert!(
matches!(
@@ -413,7 +454,7 @@ async fn assert_invalid_signature(
.iter()
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
@@ -440,7 +481,7 @@ async fn assert_invalid_signature(
.take(block_index)
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
// We don't care if this fails, we just call this to ensure that all prior blocks have been
@@ -456,6 +497,7 @@ async fn assert_invalid_signature(
.process_block(
snapshots[block_index].beacon_block.canonical_root(),
RpcBlock::new(
None,
snapshots[block_index].beacon_block.clone(),
chain_segment_blobs[block_index].clone(),
)
@@ -511,7 +553,7 @@ async fn invalid_signature_gossip_block() {
.take(block_index)
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
harness
@@ -558,7 +600,7 @@ async fn invalid_signature_block_proposal() {
.iter()
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect::<Vec<_>>();
// Ensure the block will be rejected if imported in a chain segment.
@@ -578,7 +620,7 @@ async fn invalid_signature_block_proposal() {
#[tokio::test]
async fn invalid_signature_randao_reveal() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@@ -590,7 +632,7 @@ async fn invalid_signature_randao_reveal() {
*block.body_mut().randao_reveal_mut() = junk_signature();
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@@ -606,7 +648,7 @@ async fn invalid_signature_randao_reveal() {
#[tokio::test]
async fn invalid_signature_proposer_slashing() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@@ -632,7 +674,7 @@ async fn invalid_signature_proposer_slashing() {
.expect("should update proposer slashing");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@@ -648,7 +690,7 @@ async fn invalid_signature_proposer_slashing() {
#[tokio::test]
async fn invalid_signature_attester_slashing() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@@ -685,7 +727,7 @@ async fn invalid_signature_attester_slashing() {
.expect("should update attester slashing");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@@ -701,7 +743,7 @@ async fn invalid_signature_attester_slashing() {
#[tokio::test]
async fn invalid_signature_attestation() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
let mut checked_attestation = false;
for &block_index in BLOCK_INDICES {
@@ -716,7 +758,7 @@ async fn invalid_signature_attestation() {
attestation.signature = junk_aggregate_signature();
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@@ -739,7 +781,7 @@ async fn invalid_signature_attestation() {
#[tokio::test]
async fn invalid_signature_deposit() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
// Note: an invalid deposit signature is permitted!
let harness = get_invalid_sigs_harness(&chain_segment).await;
@@ -765,13 +807,13 @@ async fn invalid_signature_deposit() {
.expect("should update deposit");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
let blocks: Vec<RpcBlock<E>> = snapshots
.iter()
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
assert!(
@@ -790,7 +832,7 @@ async fn invalid_signature_deposit() {
#[tokio::test]
async fn invalid_signature_exit() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@@ -813,7 +855,7 @@ async fn invalid_signature_exit() {
.expect("should update deposit");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@@ -877,7 +919,7 @@ async fn block_gossip_verification() {
harness
.chain
.process_blob(gossip_verified)
.process_gossip_blob(gossip_verified)
.await
.expect("should import valid gossip verified blob");
}
@@ -1143,7 +1185,11 @@ async fn verify_block_for_gossip_slashing_detection() {
.chain
.verify_blob_sidecar_for_gossip(blob, blob_index)
.unwrap();
harness.chain.process_blob(verified_blob).await.unwrap();
harness
.chain
.process_gossip_blob(verified_blob)
.await
.unwrap();
}
}
harness
@@ -1355,7 +1401,10 @@ async fn add_base_block_to_altair_chain() {
assert!(matches!(
harness
.chain
.process_chain_segment(vec![Arc::new(base_block).into()], NotifyExecutionLayer::Yes,)
.process_chain_segment(
vec![RpcBlock::new_without_blobs(None, Arc::new(base_block))],
NotifyExecutionLayer::Yes,
)
.await,
ChainSegmentResult::Failed {
imported_blocks: 0,
@@ -1491,7 +1540,7 @@ async fn add_altair_block_to_base_chain() {
harness
.chain
.process_chain_segment(
vec![Arc::new(altair_block).into()],
vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block))],
NotifyExecutionLayer::Yes
)
.await,