mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-17 21:08:32 +00:00
Devnet6 (#4404)
* some blob reprocessing work
* remove ForceBlockLookup
* reorder enum match arms in sync manager
* a lot more reprocessing work
* impl logic for triggerng blob lookups along with block lookups
* deal with rpc blobs in groups per block in the da checker. don't cache missing blob ids in the da checker.
* make single block lookup generic
* more work
* add delayed processing logic and combine some requests
* start fixing some compile errors
* fix compilation in main block lookup mod
* much work
* get things compiling
* parent blob lookups
* fix compile
* revert red/stevie changes
* fix up sync manager delay message logic
* add peer usefulness enum
* should remove lookup refactor
* consolidate retry error handling
* improve peer scoring during certain failures in parent lookups
* improve retry code
* drop parent lookup if either req has a peer disconnect during download
* refactor single block processed method
* processing peer refactor
* smol bugfix
* fix some todos
* fix lints
* fix lints
* fix compile in lookup tests
* fix lints
* fix lints
* fix existing block lookup tests
* renamings
* fix after merge
* cargo fmt
* compilation fix in beacon chain tests
* fix
* refactor lookup tests to work with multiple forks and response types
* make tests into macros
* wrap availability check error
* fix compile after merge
* add random blobs
* start fixing up lookup verify error handling
* some bug fixes and the start of deneb only tests
* make tests work for all forks
* track information about peer source
* error refactoring
* improve peer scoring
* fix test compilation
* make sure blobs are sent for processing after stream termination, delete copied tests
* add some tests and fix a bug
* smol bugfixes and moar tests
* add tests and fix some things
* compile after merge
* lots of refactoring
* retry on invalid block/blob
* merge unknown parent messages before current slot lookup
* get tests compiling
* penalize blob peer on invalid blobs
* Check disk on in-memory cache miss
* Update beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs
* Update beacon_node/network/src/sync/network_context.rs
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
* fix bug in matching blocks and blobs in range sync
* pr feedback
* fix conflicts
* upgrade logs from warn to crit when we receive incorrect response in range
* synced_and_connected_within_tolerance -> should_search_for_block
* remove todo
* add data gas used and update excess data gas to u64
* Fix Broken Overflow Tests
* payload verification with commitments
* fix merge conflicts
* restore payload file
* Restore payload file
* remove todo
* add max blob commitments per block
* c-kzg lib update
* Fix ef tests
* Abstract over minimal/mainnet spec in kzg crate
* Start integrating new KZG
* checkpoint sync without alignment
* checkpoint sync without alignment
* add import
* add import
* query for checkpoint state by slot rather than state root (teku doesn't serve by state root)
* query for checkpoint state by slot rather than state root (teku doesn't serve by state root)
* loosen check
* get state first and query by most recent block root
* Revert "loosen check"
This reverts commit 069d13dd63.
* get state first and query by most recent block root
* merge max blobs change
* simplify delay logic
* rename unknown parent sync message variants
* rename parameter, block_slot -> slot
* add some docs to the lookup module
* use interval instead of sleep
* drop request if blocks and blobs requests both return `None` for `Id`
* clean up `find_single_lookup` logic
* add lookup source enum
* clean up `find_single_lookup` logic
* add docs to find_single_lookup_request
* move LookupSource our of param where unnecessary
* remove unnecessary todo
* query for block by `state.latest_block_header.slot`
* fix lint
* fix merge transition ef tests
* fix test
* fix test
* fix observed blob sidecars test
* Add some metrics (#33)
* fix protocol limits for blobs by root
* Update Engine API for 1:1 Structure Method
* make beacon chain tests to fix devnet 6 changes
* get ckzg working and fix some tests
* fix remaining tests
* fix lints
* Fix KZG linking issues
* remove unused dep
* lockfile
* test fixes
* remove dbgs
* remove unwrap
* cleanup tx generator
* small fixes
* fixing fixes
* more self reivew
* more self review
* refactor genesis header initialization
* refactor mock el instantiations
* fix compile
* fix network test, make sure they run for each fork
* pr feedback
* fix last test (hopefully)
---------
Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>
Co-authored-by: Mark Mackey <mark@sigmaprime.io>
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use beacon_chain::test_utils::BlobSignatureKey;
|
||||
use beacon_chain::{
|
||||
blob_verification::AsBlock,
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||
@@ -36,7 +37,7 @@ lazy_static! {
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
|
||||
async fn get_chain_segment() -> (Vec<BeaconSnapshot<E>>, Vec<Option<BlobSidecarList<E>>>) {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness
|
||||
@@ -48,6 +49,7 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
|
||||
.await;
|
||||
|
||||
let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
|
||||
let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
|
||||
for snapshot in harness
|
||||
.chain
|
||||
.chain_dump()
|
||||
@@ -66,8 +68,76 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
|
||||
beacon_block: Arc::new(full_block),
|
||||
beacon_state: snapshot.beacon_state,
|
||||
});
|
||||
segment_blobs.push(
|
||||
harness
|
||||
.chain
|
||||
.get_blobs(&snapshot.beacon_block_root)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
segment
|
||||
(segment, segment_blobs)
|
||||
}
|
||||
|
||||
async fn get_chain_segment_with_signed_blobs() -> (
|
||||
Vec<BeaconSnapshot<E>>,
|
||||
Vec<Option<VariableList<SignedBlobSidecar<E>, <E as EthSpec>::MaxBlobsPerBlock>>>,
|
||||
) {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness
|
||||
.extend_chain(
|
||||
CHAIN_SEGMENT_LENGTH,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
|
||||
let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
|
||||
for snapshot in harness
|
||||
.chain
|
||||
.chain_dump()
|
||||
.expect("should dump chain")
|
||||
.into_iter()
|
||||
.skip(1)
|
||||
{
|
||||
let full_block = harness
|
||||
.chain
|
||||
.get_block(&snapshot.beacon_block_root)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
segment.push(BeaconSnapshot {
|
||||
beacon_block_root: snapshot.beacon_block_root,
|
||||
beacon_block: Arc::new(full_block),
|
||||
beacon_state: snapshot.beacon_state,
|
||||
});
|
||||
let signed_blobs = harness
|
||||
.chain
|
||||
.get_blobs(&snapshot.beacon_block_root)
|
||||
.unwrap()
|
||||
.map(|blobs| {
|
||||
let blobs = blobs
|
||||
.into_iter()
|
||||
.map(|blob| {
|
||||
let block_root = blob.block_root;
|
||||
let blob_index = blob.index;
|
||||
SignedBlobSidecar {
|
||||
message: blob,
|
||||
signature: harness
|
||||
.blob_signature_cache
|
||||
.read()
|
||||
.get(&BlobSignatureKey::new(block_root, blob_index))
|
||||
.unwrap()
|
||||
.clone(),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
VariableList::from(blobs)
|
||||
});
|
||||
segment_blobs.push(signed_blobs)
|
||||
}
|
||||
(segment, segment_blobs)
|
||||
}
|
||||
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
||||
@@ -83,10 +153,14 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
|
||||
harness
|
||||
}
|
||||
|
||||
fn chain_segment_blocks(chain_segment: &[BeaconSnapshot<E>]) -> Vec<Arc<SignedBeaconBlock<E>>> {
|
||||
fn chain_segment_blocks(
|
||||
chain_segment: &[BeaconSnapshot<E>],
|
||||
blobs: &[Option<BlobSidecarList<E>>],
|
||||
) -> Vec<BlockWrapper<E>> {
|
||||
chain_segment
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone().into())
|
||||
.zip(blobs.into_iter())
|
||||
.map(|(snapshot, blobs)| BlockWrapper::new(snapshot.beacon_block.clone(), blobs.clone()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -142,8 +216,8 @@ fn update_parent_roots(snapshots: &mut [BeaconSnapshot<E>]) {
|
||||
#[tokio::test]
|
||||
async fn chain_segment_full_segment() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
let blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
@@ -181,11 +255,12 @@ async fn chain_segment_full_segment() {
|
||||
async fn chain_segment_varying_chunk_size() {
|
||||
for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
let blocks: Vec<BlockWrapper<E>> =
|
||||
chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
|
||||
harness
|
||||
.chain
|
||||
@@ -214,7 +289,7 @@ async fn chain_segment_varying_chunk_size() {
|
||||
#[tokio::test]
|
||||
async fn chain_segment_non_linear_parent_roots() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
|
||||
harness
|
||||
.chain
|
||||
@@ -224,10 +299,11 @@ async fn chain_segment_non_linear_parent_roots() {
|
||||
/*
|
||||
* Test with a block removed.
|
||||
*/
|
||||
let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
let mut blocks: Vec<BlockWrapper<E>> =
|
||||
chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
blocks.remove(2);
|
||||
|
||||
assert!(
|
||||
@@ -245,10 +321,11 @@ async fn chain_segment_non_linear_parent_roots() {
|
||||
/*
|
||||
* Test with a modified parent root.
|
||||
*/
|
||||
let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
let mut blocks: Vec<BlockWrapper<E>> =
|
||||
chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
|
||||
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
|
||||
*block.parent_root_mut() = Hash256::zero();
|
||||
@@ -270,7 +347,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
||||
#[tokio::test]
|
||||
async fn chain_segment_non_linear_slots() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
harness
|
||||
.chain
|
||||
.slot_clock
|
||||
@@ -280,10 +357,11 @@ async fn chain_segment_non_linear_slots() {
|
||||
* Test where a child is lower than the parent.
|
||||
*/
|
||||
|
||||
let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
let mut blocks: Vec<BlockWrapper<E>> =
|
||||
chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
|
||||
*block.slot_mut() = Slot::new(0);
|
||||
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
|
||||
@@ -304,10 +382,11 @@ async fn chain_segment_non_linear_slots() {
|
||||
* Test where a child is equal to the parent.
|
||||
*/
|
||||
|
||||
let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
let mut blocks: Vec<BlockWrapper<E>> =
|
||||
chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
|
||||
*block.slot_mut() = blocks[2].slot();
|
||||
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
|
||||
@@ -327,6 +406,7 @@ async fn chain_segment_non_linear_slots() {
|
||||
|
||||
async fn assert_invalid_signature(
|
||||
chain_segment: &[BeaconSnapshot<E>],
|
||||
chain_segment_blobs: &[Option<BlobSidecarList<E>>],
|
||||
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
block_index: usize,
|
||||
snapshots: &[BeaconSnapshot<E>],
|
||||
@@ -334,7 +414,8 @@ async fn assert_invalid_signature(
|
||||
) {
|
||||
let blocks: Vec<BlockWrapper<E>> = snapshots
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone().into())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| BlockWrapper::new(snapshot.beacon_block.clone(), blobs.clone()))
|
||||
.collect();
|
||||
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
@@ -358,7 +439,8 @@ async fn assert_invalid_signature(
|
||||
let ancestor_blocks = chain_segment
|
||||
.iter()
|
||||
.take(block_index)
|
||||
.map(|snapshot| snapshot.beacon_block.clone().into())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| BlockWrapper::new(snapshot.beacon_block.clone(), blobs.clone()))
|
||||
.collect();
|
||||
// We don't care if this fails, we just call this to ensure that all prior blocks have been
|
||||
// imported prior to this test.
|
||||
@@ -372,7 +454,10 @@ async fn assert_invalid_signature(
|
||||
.chain
|
||||
.process_block(
|
||||
snapshots[block_index].beacon_block.canonical_root(),
|
||||
snapshots[block_index].beacon_block.clone(),
|
||||
BlockWrapper::new(
|
||||
snapshots[block_index].beacon_block.clone(),
|
||||
chain_segment_blobs[block_index].clone(),
|
||||
),
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await;
|
||||
@@ -403,7 +488,7 @@ async fn get_invalid_sigs_harness(
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_gossip_block() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
// Ensure the block will be rejected if imported on its own (without gossip checking).
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
@@ -421,7 +506,10 @@ async fn invalid_signature_gossip_block() {
|
||||
let ancestor_blocks = chain_segment
|
||||
.iter()
|
||||
.take(block_index)
|
||||
.map(|snapshot| snapshot.beacon_block.clone().into())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| {
|
||||
BlockWrapper::new(snapshot.beacon_block.clone(), blobs.clone())
|
||||
})
|
||||
.collect();
|
||||
harness
|
||||
.chain
|
||||
@@ -449,7 +537,7 @@ async fn invalid_signature_gossip_block() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_block_proposal() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@@ -464,7 +552,10 @@ async fn invalid_signature_block_proposal() {
|
||||
));
|
||||
let blocks: Vec<BlockWrapper<E>> = snapshots
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone().into())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| {
|
||||
BlockWrapper::new(snapshot.beacon_block.clone(), blobs.clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
assert!(
|
||||
@@ -483,7 +574,7 @@ async fn invalid_signature_block_proposal() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_randao_reveal() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@@ -497,13 +588,21 @@ async fn invalid_signature_randao_reveal() {
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(&chain_segment, &harness, block_index, &snapshots, "randao").await;
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
"randao",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_proposer_slashing() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@@ -533,6 +632,7 @@ async fn invalid_signature_proposer_slashing() {
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
@@ -544,7 +644,7 @@ async fn invalid_signature_proposer_slashing() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_attester_slashing() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@@ -585,6 +685,7 @@ async fn invalid_signature_attester_slashing() {
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
@@ -596,7 +697,7 @@ async fn invalid_signature_attester_slashing() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_attestation() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
let mut checked_attestation = false;
|
||||
|
||||
for &block_index in BLOCK_INDICES {
|
||||
@@ -615,6 +716,7 @@ async fn invalid_signature_attestation() {
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
@@ -633,7 +735,7 @@ async fn invalid_signature_attestation() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_deposit() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
// Note: an invalid deposit signature is permitted!
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
@@ -663,7 +765,10 @@ async fn invalid_signature_deposit() {
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
let blocks: Vec<BlockWrapper<E>> = snapshots
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone().into())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| {
|
||||
BlockWrapper::new(snapshot.beacon_block.clone(), blobs.clone())
|
||||
})
|
||||
.collect();
|
||||
assert!(
|
||||
!matches!(
|
||||
@@ -681,7 +786,7 @@ async fn invalid_signature_deposit() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_exit() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@@ -708,6 +813,7 @@ async fn invalid_signature_exit() {
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
@@ -727,7 +833,7 @@ fn unwrap_err<T, E>(result: Result<T, E>) -> E {
|
||||
#[tokio::test]
|
||||
async fn block_gossip_verification() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment_with_signed_blobs().await;
|
||||
|
||||
let block_index = CHAIN_SEGMENT_LENGTH - 2;
|
||||
|
||||
@@ -737,7 +843,10 @@ async fn block_gossip_verification() {
|
||||
.set_slot(chain_segment[block_index].beacon_block.slot().as_u64());
|
||||
|
||||
// Import the ancestors prior to the block we're testing.
|
||||
for snapshot in &chain_segment[0..block_index] {
|
||||
for (snapshot, blobs_opt) in chain_segment[0..block_index]
|
||||
.iter()
|
||||
.zip(chain_segment_blobs.iter())
|
||||
{
|
||||
let gossip_verified = harness
|
||||
.chain
|
||||
.verify_block_for_gossip(snapshot.beacon_block.clone().into())
|
||||
@@ -753,6 +862,21 @@ async fn block_gossip_verification() {
|
||||
)
|
||||
.await
|
||||
.expect("should import valid gossip verified block");
|
||||
if let Some(blobs) = blobs_opt {
|
||||
for blob in blobs {
|
||||
let blob_index = blob.message.index;
|
||||
let gossip_verified = harness
|
||||
.chain
|
||||
.verify_blob_sidecar_for_gossip(blob.clone(), blob_index)
|
||||
.expect("should obtain gossip verified blob");
|
||||
|
||||
harness
|
||||
.chain
|
||||
.process_blob(gossip_verified)
|
||||
.await
|
||||
.expect("should import valid gossip verified blob");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recompute the head to ensure we cache the latest view of fork choice.
|
||||
@@ -1010,14 +1134,25 @@ async fn verify_block_for_gossip_slashing_detection() {
|
||||
harness.advance_slot();
|
||||
|
||||
let state = harness.get_current_state();
|
||||
let ((block1, _), _) = harness.make_block(state.clone(), Slot::new(1)).await;
|
||||
let ((block2, _), _) = harness.make_block(state, Slot::new(1)).await;
|
||||
let ((block1, blobs1), _) = harness.make_block(state.clone(), Slot::new(1)).await;
|
||||
let ((block2, _blobs2), _) = harness.make_block(state, Slot::new(1)).await;
|
||||
|
||||
let verified_block = harness
|
||||
.chain
|
||||
.verify_block_for_gossip(Arc::new(block1).into())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
if let Some(blobs) = blobs1 {
|
||||
for blob in blobs {
|
||||
let blob_index = blob.message.index;
|
||||
let verified_blob = harness
|
||||
.chain
|
||||
.verify_blob_sidecar_for_gossip(blob, blob_index)
|
||||
.unwrap();
|
||||
harness.chain.process_blob(verified_blob).await.unwrap();
|
||||
}
|
||||
}
|
||||
harness
|
||||
.chain
|
||||
.process_block(
|
||||
|
||||
Reference in New Issue
Block a user