Enable lints for tests only running optimized (#6664)

* enable linting optimized-only tests

* fix automatically fixable or obvious lints

* fix suspicious_open_options by removing manual options

* fix `await_holding_lock`s

* avoid failing lint due to now disabled `#[cfg(debug_assertions)]`

* reduce future sizes in tests

* fix accidently flipped assert logic

* restore holding lock for web3signer download

* Merge branch 'unstable' into lint-opt-tests
This commit is contained in:
Daniel Knopik
2024-12-17 01:40:35 +01:00
committed by GitHub
parent 847c8019c7
commit 02cb2d68ff
34 changed files with 572 additions and 574 deletions

View File

@@ -512,7 +512,7 @@ mod test {
}
assert!(
!cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0),
!cache.contains(&shuffling_id_and_committee_caches.first().unwrap().0),
"should not contain oldest epoch shuffling id"
);
assert_eq!(

View File

@@ -70,12 +70,12 @@ async fn produces_attestations_from_attestation_simulator_service() {
}
// Compare the prometheus metrics that evaluates the performance of the unaggregated attestations
let hit_prometheus_metrics = vec![
let hit_prometheus_metrics = [
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL,
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL,
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL,
];
let miss_prometheus_metrics = vec![
let miss_prometheus_metrics = [
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL,
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL,
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL,

View File

@@ -431,10 +431,12 @@ impl GossipTester {
.chain
.verify_aggregated_attestation_for_gossip(&aggregate)
.err()
.expect(&format!(
"{} should error during verify_aggregated_attestation_for_gossip",
desc
));
.unwrap_or_else(|| {
panic!(
"{} should error during verify_aggregated_attestation_for_gossip",
desc
)
});
inspect_err(&self, err);
/*
@@ -449,10 +451,12 @@ impl GossipTester {
.unwrap();
assert_eq!(results.len(), 2);
let batch_err = results.pop().unwrap().err().expect(&format!(
"{} should error during batch_verify_aggregated_attestations_for_gossip",
desc
));
let batch_err = results.pop().unwrap().err().unwrap_or_else(|| {
panic!(
"{} should error during batch_verify_aggregated_attestations_for_gossip",
desc
)
});
inspect_err(&self, batch_err);
self
@@ -475,10 +479,12 @@ impl GossipTester {
.chain
.verify_unaggregated_attestation_for_gossip(&attn, Some(subnet_id))
.err()
.expect(&format!(
"{} should error during verify_unaggregated_attestation_for_gossip",
desc
));
.unwrap_or_else(|| {
panic!(
"{} should error during verify_unaggregated_attestation_for_gossip",
desc
)
});
inspect_err(&self, err);
/*
@@ -496,10 +502,12 @@ impl GossipTester {
)
.unwrap();
assert_eq!(results.len(), 2);
let batch_err = results.pop().unwrap().err().expect(&format!(
"{} should error during batch_verify_unaggregated_attestations_for_gossip",
desc
));
let batch_err = results.pop().unwrap().err().unwrap_or_else(|| {
panic!(
"{} should error during batch_verify_unaggregated_attestations_for_gossip",
desc
)
});
inspect_err(&self, batch_err);
self
@@ -816,7 +824,7 @@ async fn aggregated_gossip_verification() {
let (index, sk) = tester.non_aggregator();
*a = SignedAggregateAndProof::from_aggregate(
index as u64,
tester.valid_aggregate.message().aggregate().clone(),
tester.valid_aggregate.message().aggregate(),
None,
&sk,
&chain.canonical_head.cached_head().head_fork(),

View File

@@ -82,7 +82,7 @@ async fn merge_with_terminal_block_hash_override() {
let block = &harness.chain.head_snapshot().beacon_block;
let execution_payload = block.message().body().execution_payload().unwrap().clone();
let execution_payload = block.message().body().execution_payload().unwrap();
if i == 0 {
assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash);
}
@@ -133,7 +133,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() {
* Do the Bellatrix fork, without a terminal PoW block.
*/
harness.extend_to_slot(bellatrix_fork_slot).await;
Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await;
let bellatrix_head = &harness.chain.head_snapshot().beacon_block;
assert!(bellatrix_head.as_bellatrix().is_ok());
@@ -207,15 +207,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() {
harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block;
execution_payloads.push(
block
.message()
.body()
.execution_payload()
.unwrap()
.clone()
.into(),
);
execution_payloads.push(block.message().body().execution_payload().unwrap().into());
}
verify_execution_payload_chain(execution_payloads.as_slice());

View File

@@ -54,7 +54,7 @@ async fn base_altair_bellatrix_capella() {
/*
* Do the Altair fork.
*/
harness.extend_to_slot(altair_fork_slot).await;
Box::pin(harness.extend_to_slot(altair_fork_slot)).await;
let altair_head = &harness.chain.head_snapshot().beacon_block;
assert!(altair_head.as_altair().is_ok());
@@ -63,7 +63,7 @@ async fn base_altair_bellatrix_capella() {
/*
* Do the Bellatrix fork, without a terminal PoW block.
*/
harness.extend_to_slot(bellatrix_fork_slot).await;
Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await;
let bellatrix_head = &harness.chain.head_snapshot().beacon_block;
assert!(bellatrix_head.as_bellatrix().is_ok());
@@ -81,7 +81,7 @@ async fn base_altair_bellatrix_capella() {
/*
* Next Bellatrix block shouldn't include an exec payload.
*/
harness.extend_slots(1).await;
Box::pin(harness.extend_slots(1)).await;
let one_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block;
assert!(
@@ -112,7 +112,7 @@ async fn base_altair_bellatrix_capella() {
terminal_block.timestamp = timestamp;
}
});
harness.extend_slots(1).await;
Box::pin(harness.extend_slots(1)).await;
let two_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block;
assert!(

View File

@@ -413,7 +413,7 @@ async fn invalid_payload_invalidates_parent() {
rig.import_block(Payload::Valid).await; // Import a valid transition block.
rig.move_to_first_justification(Payload::Syncing).await;
let roots = vec![
let roots = [
rig.import_block(Payload::Syncing).await,
rig.import_block(Payload::Syncing).await,
rig.import_block(Payload::Syncing).await,
@@ -1052,7 +1052,7 @@ async fn invalid_parent() {
// Ensure the block built atop an invalid payload is invalid for gossip.
assert!(matches!(
rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await,
rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await,
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
if invalid_root == parent_root
));

View File

@@ -330,7 +330,7 @@ async fn long_skip() {
final_blocks as usize,
BlockStrategy::ForkCanonicalChainAt {
previous_slot: Slot::new(initial_blocks),
first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1),
first_slot: Slot::new(initial_blocks + skip_slots + 1),
},
AttestationStrategy::AllValidators,
)
@@ -381,8 +381,7 @@ async fn randao_genesis_storage() {
.beacon_state
.randao_mixes()
.iter()
.find(|x| **x == genesis_value)
.is_some());
.any(|x| *x == genesis_value));
// Then upon adding one more block, it isn't
harness.advance_slot();
@@ -393,14 +392,13 @@ async fn randao_genesis_storage() {
AttestationStrategy::AllValidators,
)
.await;
assert!(harness
assert!(!harness
.chain
.head_snapshot()
.beacon_state
.randao_mixes()
.iter()
.find(|x| **x == genesis_value)
.is_none());
.any(|x| *x == genesis_value));
check_finalization(&harness, num_slots);
check_split_slot(&harness, store);
@@ -1062,7 +1060,7 @@ fn check_shuffling_compatible(
let current_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible(
&block_root,
head_state.current_epoch(),
&head_state,
head_state,
);
// Check for consistency with the more expensive shuffling lookup.
@@ -1102,7 +1100,7 @@ fn check_shuffling_compatible(
let previous_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible(
&block_root,
head_state.previous_epoch(),
&head_state,
head_state,
);
harness
.chain
@@ -1130,14 +1128,11 @@ fn check_shuffling_compatible(
// Targeting two epochs before the current epoch should always return false
if head_state.current_epoch() >= 2 {
assert_eq!(
harness.chain.shuffling_is_compatible(
&block_root,
head_state.current_epoch() - 2,
&head_state
),
false
);
assert!(!harness.chain.shuffling_is_compatible(
&block_root,
head_state.current_epoch() - 2,
head_state
));
}
}
}
@@ -1559,14 +1554,13 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
.map(Into::into)
.collect();
let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap();
let (canonical_blocks, _, _, _) = rig
.add_attested_blocks_at_slots(
canonical_state,
canonical_state_root,
&canonical_slots,
&honest_validators,
)
.await;
let (canonical_blocks, _, _, _) = Box::pin(rig.add_attested_blocks_at_slots(
canonical_state,
canonical_state_root,
&canonical_slots,
&honest_validators,
))
.await;
// Postconditions
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
@@ -1939,7 +1933,7 @@ async fn prune_single_block_long_skip() {
2 * slots_per_epoch,
1,
2 * slots_per_epoch,
2 * slots_per_epoch as u64,
2 * slots_per_epoch,
1,
)
.await;
@@ -1961,31 +1955,45 @@ async fn prune_shared_skip_states_mid_epoch() {
#[tokio::test]
async fn prune_shared_skip_states_epoch_boundaries() {
let slots_per_epoch = E::slots_per_epoch();
pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch).await;
pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch).await;
pruning_test(
2 * slots_per_epoch + slots_per_epoch / 2,
slots_per_epoch as u64 / 2,
Box::pin(pruning_test(
slots_per_epoch - 1,
1,
slots_per_epoch,
slots_per_epoch as u64 / 2 + 1,
2,
slots_per_epoch,
)
))
.await;
pruning_test(
2 * slots_per_epoch + slots_per_epoch / 2,
slots_per_epoch as u64 / 2,
Box::pin(pruning_test(
slots_per_epoch - 1,
2,
slots_per_epoch,
slots_per_epoch as u64 / 2 + 1,
1,
slots_per_epoch,
)
))
.await;
pruning_test(
Box::pin(pruning_test(
2 * slots_per_epoch + slots_per_epoch / 2,
slots_per_epoch / 2,
slots_per_epoch,
slots_per_epoch / 2 + 1,
slots_per_epoch,
))
.await;
Box::pin(pruning_test(
2 * slots_per_epoch + slots_per_epoch / 2,
slots_per_epoch / 2,
slots_per_epoch,
slots_per_epoch / 2 + 1,
slots_per_epoch,
))
.await;
Box::pin(pruning_test(
2 * slots_per_epoch - 1,
slots_per_epoch as u64,
slots_per_epoch,
1,
0,
2 * slots_per_epoch,
)
))
.await;
}
@@ -2094,7 +2102,7 @@ async fn pruning_test(
);
check_chain_dump(
&harness,
(num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1) as u64,
num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1,
);
let all_canonical_states = harness
@@ -2613,8 +2621,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
harness.advance_slot();
}
harness.extend_to_slot(finalizing_slot - 1).await;
harness
.add_block_at_slot(finalizing_slot, harness.get_current_state())
Box::pin(harness.add_block_at_slot(finalizing_slot, harness.get_current_state()))
.await
.unwrap();

View File

@@ -73,7 +73,7 @@ fn get_valid_sync_committee_message_for_block(
let head_state = harness.chain.head_beacon_state_cloned();
let (signature, _) = harness
.make_sync_committee_messages(&head_state, block_root, slot, relative_sync_committee)
.get(0)
.first()
.expect("sync messages should exist")
.get(message_index)
.expect("first sync message should exist")
@@ -104,7 +104,7 @@ fn get_valid_sync_contribution(
);
let (_, contribution_opt) = sync_contributions
.get(0)
.first()
.expect("sync contributions should exist");
let contribution = contribution_opt
.as_ref()

View File

@@ -170,7 +170,7 @@ async fn find_reorgs() {
harness
.extend_chain(
num_blocks_produced as usize,
num_blocks_produced,
BlockStrategy::OnCanonicalHead,
// No need to produce attestations for this test.
AttestationStrategy::SomeValidators(vec![]),
@@ -203,7 +203,7 @@ async fn find_reorgs() {
assert_eq!(
find_reorg_slot(
&harness.chain,
&head_state,
head_state,
harness.chain.head_beacon_block().canonical_root()
),
head_slot
@@ -503,7 +503,6 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() {
.unwrap();
let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT)
.into_iter()
.map(|validator_index| {
let slot = state
.get_attestation_duties(validator_index, RelativeEpoch::Current)

View File

@@ -322,7 +322,7 @@ pub async fn consensus_gossip() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn consensus_partial_pass_only_consensus() {
/* this test targets gossip-level validation */
let validation_level: Option<BroadcastValidation> = Some(BroadcastValidation::Consensus);
let validation_level = BroadcastValidation::Consensus;
// Validator count needs to be at least 32 or proposer boost gets set to 0 when computing
// `validator_count // 32`.
@@ -378,7 +378,7 @@ pub async fn consensus_partial_pass_only_consensus() {
tester.harness.chain.clone(),
&channel.0,
test_logger,
validation_level.unwrap(),
validation_level,
StatusCode::ACCEPTED,
network_globals,
)
@@ -615,8 +615,7 @@ pub async fn equivocation_gossip() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn equivocation_consensus_late_equivocation() {
/* this test targets gossip-level validation */
let validation_level: Option<BroadcastValidation> =
Some(BroadcastValidation::ConsensusAndEquivocation);
let validation_level = BroadcastValidation::ConsensusAndEquivocation;
// Validator count needs to be at least 32 or proposer boost gets set to 0 when computing
// `validator_count // 32`.
@@ -671,7 +670,7 @@ pub async fn equivocation_consensus_late_equivocation() {
tester.harness.chain,
&channel.0,
test_logger,
validation_level.unwrap(),
validation_level,
StatusCode::ACCEPTED,
network_globals,
)
@@ -1228,8 +1227,7 @@ pub async fn blinded_equivocation_gossip() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn blinded_equivocation_consensus_late_equivocation() {
/* this test targets gossip-level validation */
let validation_level: Option<BroadcastValidation> =
Some(BroadcastValidation::ConsensusAndEquivocation);
let validation_level = BroadcastValidation::ConsensusAndEquivocation;
// Validator count needs to be at least 32 or proposer boost gets set to 0 when computing
// `validator_count // 32`.
@@ -1311,7 +1309,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
tester.harness.chain,
&channel.0,
test_logger,
validation_level.unwrap(),
validation_level,
StatusCode::ACCEPTED,
network_globals,
)
@@ -1465,8 +1463,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() {
"need at least 2 blobs for partial reveal"
);
let partial_kzg_proofs = vec![blobs.0.get(0).unwrap().clone()];
let partial_blobs = vec![blobs.1.get(0).unwrap().clone()];
let partial_kzg_proofs = vec![*blobs.0.first().unwrap()];
let partial_blobs = vec![blobs.1.first().unwrap().clone()];
// Simulate the block being seen on gossip.
block

View File

@@ -139,7 +139,7 @@ impl ForkChoiceUpdates {
fn insert(&mut self, update: ForkChoiceUpdateMetadata) {
self.updates
.entry(update.state.head_block_hash)
.or_insert_with(Vec::new)
.or_default()
.push(update);
}

View File

@@ -57,18 +57,18 @@ async fn el_syncing_then_synced() {
mock_el.el.upcheck().await;
let api_response = tester.client.get_node_syncing().await.unwrap().data;
assert_eq!(api_response.el_offline, false);
assert_eq!(api_response.is_optimistic, false);
assert_eq!(api_response.is_syncing, false);
assert!(!api_response.el_offline);
assert!(!api_response.is_optimistic);
assert!(!api_response.is_syncing);
// EL synced
mock_el.server.set_syncing_response(Ok(false));
mock_el.el.upcheck().await;
let api_response = tester.client.get_node_syncing().await.unwrap().data;
assert_eq!(api_response.el_offline, false);
assert_eq!(api_response.is_optimistic, false);
assert_eq!(api_response.is_syncing, false);
assert!(!api_response.el_offline);
assert!(!api_response.is_optimistic);
assert!(!api_response.is_syncing);
}
/// Check `syncing` endpoint when the EL is offline (errors on upcheck).
@@ -85,9 +85,9 @@ async fn el_offline() {
mock_el.el.upcheck().await;
let api_response = tester.client.get_node_syncing().await.unwrap().data;
assert_eq!(api_response.el_offline, true);
assert_eq!(api_response.is_optimistic, false);
assert_eq!(api_response.is_syncing, false);
assert!(api_response.el_offline);
assert!(!api_response.is_optimistic);
assert!(!api_response.is_syncing);
}
/// Check `syncing` endpoint when the EL errors on newPaylod but is not fully offline.
@@ -128,9 +128,9 @@ async fn el_error_on_new_payload() {
// The EL should now be *offline* according to the API.
let api_response = tester.client.get_node_syncing().await.unwrap().data;
assert_eq!(api_response.el_offline, true);
assert_eq!(api_response.is_optimistic, false);
assert_eq!(api_response.is_syncing, false);
assert!(api_response.el_offline);
assert!(!api_response.is_optimistic);
assert!(!api_response.is_syncing);
// Processing a block successfully should remove the status.
mock_el.server.set_new_payload_status(
@@ -144,9 +144,9 @@ async fn el_error_on_new_payload() {
harness.process_block_result((block, blobs)).await.unwrap();
let api_response = tester.client.get_node_syncing().await.unwrap().data;
assert_eq!(api_response.el_offline, false);
assert_eq!(api_response.is_optimistic, false);
assert_eq!(api_response.is_syncing, false);
assert!(!api_response.el_offline);
assert!(!api_response.is_optimistic);
assert!(!api_response.is_syncing);
}
/// Check `node health` endpoint when the EL is offline.

View File

@@ -274,10 +274,10 @@ impl ApiTester {
let mock_builder_server = harness.set_mock_builder(beacon_url.clone());
// Start the mock builder service prior to building the chain out.
harness.runtime.task_executor.spawn(
async move { mock_builder_server.await },
"mock_builder_server",
);
harness
.runtime
.task_executor
.spawn(mock_builder_server, "mock_builder_server");
let mock_builder = harness.mock_builder.clone();
@@ -641,7 +641,7 @@ impl ApiTester {
self
}
pub async fn test_beacon_blocks_finalized<E: EthSpec>(self) -> Self {
pub async fn test_beacon_blocks_finalized(self) -> Self {
for block_id in self.interesting_block_ids() {
let block_root = block_id.root(&self.chain);
let block = block_id.full_block(&self.chain).await;
@@ -678,7 +678,7 @@ impl ApiTester {
self
}
pub async fn test_beacon_blinded_blocks_finalized<E: EthSpec>(self) -> Self {
pub async fn test_beacon_blinded_blocks_finalized(self) -> Self {
for block_id in self.interesting_block_ids() {
let block_root = block_id.root(&self.chain);
let block = block_id.full_block(&self.chain).await;
@@ -819,7 +819,7 @@ impl ApiTester {
let validator_index_ids = validator_indices
.iter()
.cloned()
.map(|i| ValidatorId::Index(i))
.map(ValidatorId::Index)
.collect::<Vec<ValidatorId>>();
let unsupported_media_response = self
@@ -859,7 +859,7 @@ impl ApiTester {
let validator_index_ids = validator_indices
.iter()
.cloned()
.map(|i| ValidatorId::Index(i))
.map(ValidatorId::Index)
.collect::<Vec<ValidatorId>>();
let validator_pubkey_ids = validator_indices
.iter()
@@ -910,7 +910,7 @@ impl ApiTester {
for i in validator_indices {
if i < state.balances().len() as u64 {
validators.push(ValidatorBalanceData {
index: i as u64,
index: i,
balance: *state.balances().get(i as usize).unwrap(),
});
}
@@ -944,7 +944,7 @@ impl ApiTester {
let validator_index_ids = validator_indices
.iter()
.cloned()
.map(|i| ValidatorId::Index(i))
.map(ValidatorId::Index)
.collect::<Vec<ValidatorId>>();
let validator_pubkey_ids = validator_indices
.iter()
@@ -1012,7 +1012,7 @@ impl ApiTester {
|| statuses.contains(&status.superstatus())
{
validators.push(ValidatorData {
index: i as u64,
index: i,
balance: *state.balances().get(i as usize).unwrap(),
status,
validator,
@@ -1641,11 +1641,7 @@ impl ApiTester {
let (block, _, _) = block_id.full_block(&self.chain).await.unwrap();
let num_blobs = block.num_expected_blobs();
let blob_indices = if use_indices {
Some(
(0..num_blobs.saturating_sub(1) as u64)
.into_iter()
.collect::<Vec<_>>(),
)
Some((0..num_blobs.saturating_sub(1) as u64).collect::<Vec<_>>())
} else {
None
};
@@ -1663,7 +1659,7 @@ impl ApiTester {
blob_indices.map_or(num_blobs, |indices| indices.len())
);
let expected = block.slot();
assert_eq!(result.get(0).unwrap().slot(), expected);
assert_eq!(result.first().unwrap().slot(), expected);
self
}
@@ -1701,9 +1697,9 @@ impl ApiTester {
break;
}
}
let test_slot = test_slot.expect(&format!(
"should be able to find a block matching zero_blobs={zero_blobs}"
));
let test_slot = test_slot.unwrap_or_else(|| {
panic!("should be able to find a block matching zero_blobs={zero_blobs}")
});
match self
.client
@@ -1772,7 +1768,6 @@ impl ApiTester {
.attestations()
.map(|att| att.clone_as_attestation())
.collect::<Vec<_>>()
.into()
},
);
@@ -1909,7 +1904,7 @@ impl ApiTester {
let result = match self
.client
.get_beacon_light_client_updates::<E>(current_sync_committee_period as u64, 1)
.get_beacon_light_client_updates::<E>(current_sync_committee_period, 1)
.await
{
Ok(result) => result,
@@ -1921,7 +1916,7 @@ impl ApiTester {
.light_client_server_cache
.get_light_client_updates(
&self.chain.store,
current_sync_committee_period as u64,
current_sync_committee_period,
1,
&self.chain.spec,
)
@@ -2314,7 +2309,7 @@ impl ApiTester {
.unwrap()
.data
.is_syncing;
assert_eq!(is_syncing, true);
assert!(is_syncing);
// Reset sync state.
*self
@@ -2364,7 +2359,7 @@ impl ApiTester {
pub async fn test_get_node_peers_by_id(self) -> Self {
let result = self
.client
.get_node_peers_by_id(self.external_peer_id.clone())
.get_node_peers_by_id(self.external_peer_id)
.await
.unwrap()
.data;
@@ -3514,6 +3509,7 @@ impl ApiTester {
self
}
#[allow(clippy::await_holding_lock)] // This is a test, so it should be fine.
pub async fn test_get_validator_aggregate_attestation(self) -> Self {
if self
.chain
@@ -4058,7 +4054,7 @@ impl ApiTester {
ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"),
};
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
let expected_fee_recipient = Address::from_low_u64_be(proposer_index);
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT);
@@ -4085,7 +4081,7 @@ impl ApiTester {
ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"),
};
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
let expected_fee_recipient = Address::from_low_u64_be(proposer_index);
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
// This is the graffiti of the mock execution layer, not the builder.
assert_eq!(payload.extra_data(), mock_el_extra_data::<E>());
@@ -4113,7 +4109,7 @@ impl ApiTester {
ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"),
};
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
let expected_fee_recipient = Address::from_low_u64_be(proposer_index);
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT);
@@ -4137,7 +4133,7 @@ impl ApiTester {
.unwrap()
.into();
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
let expected_fee_recipient = Address::from_low_u64_be(proposer_index);
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT);
@@ -4183,7 +4179,7 @@ impl ApiTester {
.unwrap()
.into();
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
let expected_fee_recipient = Address::from_low_u64_be(proposer_index);
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
assert_eq!(payload.gas_limit(), builder_limit);
@@ -4267,7 +4263,7 @@ impl ApiTester {
ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"),
};
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
let expected_fee_recipient = Address::from_low_u64_be(proposer_index);
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
assert_eq!(payload.gas_limit(), 30_000_000);
@@ -5140,9 +5136,8 @@ impl ApiTester {
pub async fn test_builder_chain_health_optimistic_head(self) -> Self {
// Make sure the next payload verification will return optimistic before advancing the chain.
self.harness.mock_execution_layer.as_ref().map(|el| {
self.harness.mock_execution_layer.as_ref().inspect(|el| {
el.server.all_payloads_syncing(true);
el
});
self.harness
.extend_chain(
@@ -5169,7 +5164,7 @@ impl ApiTester {
.unwrap()
.into();
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
let expected_fee_recipient = Address::from_low_u64_be(proposer_index);
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
// If this cache is populated, it indicates fallback to the local EE was correctly used.
@@ -5188,9 +5183,8 @@ impl ApiTester {
pub async fn test_builder_v3_chain_health_optimistic_head(self) -> Self {
// Make sure the next payload verification will return optimistic before advancing the chain.
self.harness.mock_execution_layer.as_ref().map(|el| {
self.harness.mock_execution_layer.as_ref().inspect(|el| {
el.server.all_payloads_syncing(true);
el
});
self.harness
.extend_chain(
@@ -5220,7 +5214,7 @@ impl ApiTester {
ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"),
};
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
let expected_fee_recipient = Address::from_low_u64_be(proposer_index);
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
self
@@ -6101,16 +6095,17 @@ impl ApiTester {
assert_eq!(result.execution_optimistic, Some(false));
// Change head to be optimistic.
self.chain
if let Some(head_node) = self
.chain
.canonical_head
.fork_choice_write_lock()
.proto_array_mut()
.core_proto_array_mut()
.nodes
.last_mut()
.map(|head_node| {
head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero())
});
{
head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero())
}
// Check responses are now optimistic.
let result = self
@@ -6143,8 +6138,8 @@ async fn poll_events<S: Stream<Item = Result<EventKind<E>, eth2::Error>> + Unpin
};
tokio::select! {
_ = collect_stream_fut => {events}
_ = tokio::time::sleep(timeout) => { return events; }
_ = collect_stream_fut => { events }
_ = tokio::time::sleep(timeout) => { events }
}
}
@@ -6180,31 +6175,31 @@ async fn test_unsupported_media_response() {
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn beacon_get() {
async fn beacon_get_state_hashes() {
ApiTester::new()
.await
.test_beacon_states_root_finalized()
.await
.test_beacon_states_finality_checkpoints_finalized()
.await
.test_beacon_states_root()
.await
.test_beacon_states_finality_checkpoints()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn beacon_get_state_info() {
ApiTester::new()
.await
.test_beacon_genesis()
.await
.test_beacon_states_root_finalized()
.await
.test_beacon_states_fork_finalized()
.await
.test_beacon_states_finality_checkpoints_finalized()
.await
.test_beacon_headers_block_id_finalized()
.await
.test_beacon_blocks_finalized::<MainnetEthSpec>()
.await
.test_beacon_blinded_blocks_finalized::<MainnetEthSpec>()
.await
.test_debug_beacon_states_finalized()
.await
.test_beacon_states_root()
.await
.test_beacon_states_fork()
.await
.test_beacon_states_finality_checkpoints()
.await
.test_beacon_states_validators()
.await
.test_beacon_states_validator_balances()
@@ -6214,6 +6209,18 @@ async fn beacon_get() {
.test_beacon_states_validator_id()
.await
.test_beacon_states_randao()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn beacon_get_blocks() {
ApiTester::new()
.await
.test_beacon_headers_block_id_finalized()
.await
.test_beacon_blocks_finalized()
.await
.test_beacon_blinded_blocks_finalized()
.await
.test_beacon_headers_all_slots()
.await
@@ -6228,6 +6235,12 @@ async fn beacon_get() {
.test_beacon_blocks_attestations()
.await
.test_beacon_blocks_root()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn beacon_get_pools() {
ApiTester::new()
.await
.test_get_beacon_pool_attestations()
.await

View File

@@ -250,18 +250,17 @@ impl futures::stream::Stream for GossipCache {
Poll::Ready(Some(expired)) => {
let expected_key = expired.key();
let (topic, data) = expired.into_inner();
match self.topic_msgs.get_mut(&topic) {
Some(msgs) => {
let key = msgs.remove(&data);
debug_assert_eq!(key, Some(expected_key));
if msgs.is_empty() {
// no more messages for this topic.
self.topic_msgs.remove(&topic);
}
}
None => {
#[cfg(debug_assertions)]
panic!("Topic for registered message is not present.")
let topic_msg = self.topic_msgs.get_mut(&topic);
debug_assert!(
topic_msg.is_some(),
"Topic for registered message is not present."
);
if let Some(msgs) = topic_msg {
let key = msgs.remove(&data);
debug_assert_eq!(key, Some(expected_key));
if msgs.is_empty() {
// no more messages for this topic.
self.topic_msgs.remove(&topic);
}
}
Poll::Ready(Some(Ok(topic)))

View File

@@ -527,7 +527,7 @@ impl TestRig {
self.assert_event_journal(
&expected
.iter()
.map(|ev| Into::<&'static str>::into(ev))
.map(Into::<&'static str>::into)
.chain(std::iter::once(WORKER_FREED))
.chain(std::iter::once(NOTHING_TO_DO))
.collect::<Vec<_>>(),

View File

@@ -1,235 +1,229 @@
#[cfg(not(debug_assertions))]
#[cfg(test)]
mod tests {
use crate::persisted_dht::load_dht;
use crate::{NetworkConfig, NetworkService};
use beacon_chain::test_utils::BeaconChainHarness;
use beacon_chain::BeaconChainTypes;
use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig};
use futures::StreamExt;
use lighthouse_network::types::{GossipEncoding, GossipKind};
use lighthouse_network::{Enr, GossipTopic};
use slog::{o, Drain, Level, Logger};
use sloggers::{null::NullLoggerBuilder, Build};
use std::str::FromStr;
use std::sync::Arc;
use tokio::runtime::Runtime;
use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId};
#![cfg(not(debug_assertions))]
#![cfg(test)]
use crate::persisted_dht::load_dht;
use crate::{NetworkConfig, NetworkService};
use beacon_chain::test_utils::BeaconChainHarness;
use beacon_chain::BeaconChainTypes;
use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig};
use futures::StreamExt;
use lighthouse_network::types::{GossipEncoding, GossipKind};
use lighthouse_network::{Enr, GossipTopic};
use slog::{o, Drain, Level, Logger};
use sloggers::{null::NullLoggerBuilder, Build};
use std::str::FromStr;
use std::sync::Arc;
use tokio::runtime::Runtime;
use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId};
impl<T: BeaconChainTypes> NetworkService<T> {
fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> {
self.libp2p.get_topic_params(topic)
}
impl<T: BeaconChainTypes> NetworkService<T> {
fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> {
self.libp2p.get_topic_params(topic)
}
}
fn get_logger(actual_log: bool) -> Logger {
if actual_log {
let drain = {
let decorator = slog_term::TermDecorator::new().build();
let decorator =
logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH);
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).chan_size(2048).build();
drain.filter_level(Level::Debug)
};
fn get_logger(actual_log: bool) -> Logger {
if actual_log {
let drain = {
let decorator = slog_term::TermDecorator::new().build();
let decorator =
logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH);
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).chan_size(2048).build();
drain.filter_level(Level::Debug)
};
Logger::root(drain.fuse(), o!())
} else {
let builder = NullLoggerBuilder;
builder.build().expect("should build logger")
}
Logger::root(drain.fuse(), o!())
} else {
let builder = NullLoggerBuilder;
builder.build().expect("should build logger")
}
}
#[test]
fn test_dht_persistence() {
let log = get_logger(false);
#[test]
fn test_dht_persistence() {
let log = get_logger(false);
let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec)
.default_spec()
.deterministic_keypairs(8)
.fresh_ephemeral_store()
.build()
.chain;
let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec)
.default_spec()
.deterministic_keypairs(8)
.fresh_ephemeral_store()
.build()
.chain;
let store = beacon_chain.store.clone();
let store = beacon_chain.store.clone();
let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap();
let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap();
let enrs = vec![enr1, enr2];
let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap();
let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap();
let enrs = vec![enr1, enr2];
let runtime = Arc::new(Runtime::new().unwrap());
let runtime = Arc::new(Runtime::new().unwrap());
let (signal, exit) = async_channel::bounded(1);
let (signal, exit) = async_channel::bounded(1);
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let executor =
task_executor::TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx);
let mut config = NetworkConfig::default();
config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213);
config.discv5_config.table_filter = |_| true; // Do not ignore local IPs
config.upnp_enabled = false;
config.boot_nodes_enr = enrs.clone();
let config = Arc::new(config);
runtime.block_on(async move {
// Create a new network service which implicitly gets dropped at the
// end of the block.
let BeaconProcessorChannels {
beacon_processor_tx,
beacon_processor_rx: _beacon_processor_rx,
work_reprocessing_tx,
work_reprocessing_rx: _work_reprocessing_rx,
} = <_>::default();
let _network_service = NetworkService::start(
beacon_chain.clone(),
config,
executor,
None,
beacon_processor_tx,
work_reprocessing_tx,
)
.await
.unwrap();
drop(signal);
});
let raw_runtime = Arc::try_unwrap(runtime).unwrap();
raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300));
// Load the persisted dht from the store
let persisted_enrs = load_dht(store);
assert!(
persisted_enrs.contains(&enrs[0]),
"should have persisted the first ENR to store"
);
assert!(
persisted_enrs.contains(&enrs[1]),
"should have persisted the second ENR to store"
);
}
// Test removing topic weight on old topics when a fork happens.
#[test]
fn test_removing_topic_weight_on_old_topics() {
let runtime = Arc::new(Runtime::new().unwrap());
// Capella spec
let mut spec = MinimalEthSpec::default_spec();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = Some(Epoch::new(1));
// Build beacon chain.
let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec)
.spec(spec.clone().into())
.deterministic_keypairs(8)
.fresh_ephemeral_store()
.mock_execution_layer()
.build()
.chain;
let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork");
assert_eq!(next_fork_name, ForkName::Capella);
// Build network service.
let (mut network_service, network_globals, _network_senders) = runtime.block_on(async {
let (_, exit) = async_channel::bounded(1);
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let executor = task_executor::TaskExecutor::new(
Arc::downgrade(&runtime),
exit,
log.clone(),
get_logger(false),
shutdown_tx,
);
let mut config = NetworkConfig::default();
config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213);
config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215);
config.discv5_config.table_filter = |_| true; // Do not ignore local IPs
config.upnp_enabled = false;
config.boot_nodes_enr = enrs.clone();
let config = Arc::new(config);
runtime.block_on(async move {
// Create a new network service which implicitly gets dropped at the
// end of the block.
let BeaconProcessorChannels {
beacon_processor_tx,
beacon_processor_rx: _beacon_processor_rx,
work_reprocessing_tx,
work_reprocessing_rx: _work_reprocessing_rx,
} = <_>::default();
let beacon_processor_channels =
BeaconProcessorChannels::new(&BeaconProcessorConfig::default());
NetworkService::build(
beacon_chain.clone(),
config,
executor.clone(),
None,
beacon_processor_channels.beacon_processor_tx,
beacon_processor_channels.work_reprocessing_tx,
)
.await
.unwrap()
});
let _network_service = NetworkService::start(
beacon_chain.clone(),
config,
executor,
None,
beacon_processor_tx,
work_reprocessing_tx,
)
.await
.unwrap();
drop(signal);
});
let raw_runtime = Arc::try_unwrap(runtime).unwrap();
raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300));
// Load the persisted dht from the store
let persisted_enrs = load_dht(store);
assert!(
persisted_enrs.contains(&enrs[0]),
"should have persisted the first ENR to store"
);
assert!(
persisted_enrs.contains(&enrs[1]),
"should have persisted the second ENR to store"
);
}
// Test removing topic weight on old topics when a fork happens.
#[test]
fn test_removing_topic_weight_on_old_topics() {
let runtime = Arc::new(Runtime::new().unwrap());
// Capella spec
let mut spec = MinimalEthSpec::default_spec();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = Some(Epoch::new(1));
// Build beacon chain.
let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec)
.spec(spec.clone().into())
.deterministic_keypairs(8)
.fresh_ephemeral_store()
.mock_execution_layer()
.build()
.chain;
let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork");
assert_eq!(next_fork_name, ForkName::Capella);
// Build network service.
let (mut network_service, network_globals, _network_senders) = runtime.block_on(async {
let (_, exit) = async_channel::bounded(1);
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let executor = task_executor::TaskExecutor::new(
Arc::downgrade(&runtime),
exit,
get_logger(false),
shutdown_tx,
);
let mut config = NetworkConfig::default();
config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215);
config.discv5_config.table_filter = |_| true; // Do not ignore local IPs
config.upnp_enabled = false;
let config = Arc::new(config);
let beacon_processor_channels =
BeaconProcessorChannels::new(&BeaconProcessorConfig::default());
NetworkService::build(
beacon_chain.clone(),
config,
executor.clone(),
None,
beacon_processor_channels.beacon_processor_tx,
beacon_processor_channels.work_reprocessing_tx,
)
.await
.unwrap()
});
// Subscribe to the topics.
runtime.block_on(async {
while network_globals.gossipsub_subscriptions.read().len() < 2 {
if let Some(msg) = network_service.subnet_service.next().await {
network_service.on_subnet_service_msg(msg);
}
// Subscribe to the topics.
runtime.block_on(async {
while network_globals.gossipsub_subscriptions.read().len() < 2 {
if let Some(msg) = network_service.subnet_service.next().await {
network_service.on_subnet_service_msg(msg);
}
});
// Make sure the service is subscribed to the topics.
let (old_topic1, old_topic2) = {
let mut subnets = SubnetId::compute_attestation_subnets(
network_globals.local_enr().node_id().raw(),
&spec,
)
.collect::<Vec<_>>();
assert_eq!(2, subnets.len());
let old_fork_digest = beacon_chain.enr_fork_id().fork_digest;
let old_topic1 = GossipTopic::new(
GossipKind::Attestation(subnets.pop().unwrap()),
GossipEncoding::SSZSnappy,
old_fork_digest,
);
let old_topic2 = GossipTopic::new(
GossipKind::Attestation(subnets.pop().unwrap()),
GossipEncoding::SSZSnappy,
old_fork_digest,
);
(old_topic1, old_topic2)
};
let subscriptions = network_globals.gossipsub_subscriptions.read().clone();
assert_eq!(2, subscriptions.len());
assert!(subscriptions.contains(&old_topic1));
assert!(subscriptions.contains(&old_topic2));
let old_topic_params1 = network_service
.get_topic_params(old_topic1.clone())
.expect("topic score params");
assert!(old_topic_params1.topic_weight > 0.0);
let old_topic_params2 = network_service
.get_topic_params(old_topic2.clone())
.expect("topic score params");
assert!(old_topic_params2.topic_weight > 0.0);
// Advance slot to the next fork
for _ in 0..MinimalEthSpec::slots_per_epoch() {
beacon_chain.slot_clock.advance_slot();
}
});
// Run `NetworkService::update_next_fork()`.
runtime.block_on(async {
network_service.update_next_fork();
});
// Make sure the service is subscribed to the topics.
let (old_topic1, old_topic2) = {
let mut subnets = SubnetId::compute_attestation_subnets(
network_globals.local_enr().node_id().raw(),
&spec,
)
.collect::<Vec<_>>();
assert_eq!(2, subnets.len());
// Check that topic_weight on the old topics has been zeroed.
let old_topic_params1 = network_service
.get_topic_params(old_topic1)
.expect("topic score params");
assert_eq!(0.0, old_topic_params1.topic_weight);
let old_fork_digest = beacon_chain.enr_fork_id().fork_digest;
let old_topic1 = GossipTopic::new(
GossipKind::Attestation(subnets.pop().unwrap()),
GossipEncoding::SSZSnappy,
old_fork_digest,
);
let old_topic2 = GossipTopic::new(
GossipKind::Attestation(subnets.pop().unwrap()),
GossipEncoding::SSZSnappy,
old_fork_digest,
);
let old_topic_params2 = network_service
.get_topic_params(old_topic2)
.expect("topic score params");
assert_eq!(0.0, old_topic_params2.topic_weight);
(old_topic1, old_topic2)
};
let subscriptions = network_globals.gossipsub_subscriptions.read().clone();
assert_eq!(2, subscriptions.len());
assert!(subscriptions.contains(&old_topic1));
assert!(subscriptions.contains(&old_topic2));
let old_topic_params1 = network_service
.get_topic_params(old_topic1.clone())
.expect("topic score params");
assert!(old_topic_params1.topic_weight > 0.0);
let old_topic_params2 = network_service
.get_topic_params(old_topic2.clone())
.expect("topic score params");
assert!(old_topic_params2.topic_weight > 0.0);
// Advance slot to the next fork
for _ in 0..MinimalEthSpec::slots_per_epoch() {
beacon_chain.slot_clock.advance_slot();
}
// Run `NetworkService::update_next_fork()`.
runtime.block_on(async {
network_service.update_next_fork();
});
// Check that topic_weight on the old topics has been zeroed.
let old_topic_params1 = network_service
.get_topic_params(old_topic1)
.expect("topic score params");
assert_eq!(0.0, old_topic_params1.topic_weight);
let old_topic_params2 = network_service
.get_topic_params(old_topic2)
.expect("topic score params");
assert_eq!(0.0, old_topic_params2.topic_weight);
}

View File

@@ -877,11 +877,11 @@ mod release_tests {
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(1);
// Only run this test on the phase0 hard-fork.
if spec.altair_fork_epoch != None {
if spec.altair_fork_epoch.is_some() {
return;
}
let mut state = get_current_state_initialize_epoch_cache(&harness, &spec);
let mut state = get_current_state_initialize_epoch_cache(&harness, spec);
let slot = state.slot();
let committees = state
.get_beacon_committees_at_slot(slot)
@@ -902,10 +902,10 @@ mod release_tests {
);
for (atts, aggregate) in &attestations {
let att2 = aggregate.as_ref().unwrap().message().aggregate().clone();
let att2 = aggregate.as_ref().unwrap().message().aggregate();
let att1 = atts
.into_iter()
.iter()
.map(|(att, _)| att)
.take(2)
.fold::<Option<Attestation<MainnetEthSpec>>, _>(None, |att, new_att| {
@@ -946,7 +946,7 @@ mod release_tests {
.unwrap();
assert_eq!(
committees.get(0).unwrap().committee.len() - 2,
committees.first().unwrap().committee.len() - 2,
earliest_attestation_validators(
&att2_split.as_ref(),
&state,
@@ -963,7 +963,7 @@ mod release_tests {
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(1);
let op_pool = OperationPool::<MainnetEthSpec>::new();
let mut state = get_current_state_initialize_epoch_cache(&harness, &spec);
let mut state = get_current_state_initialize_epoch_cache(&harness, spec);
let slot = state.slot();
let committees = state
@@ -1020,7 +1020,7 @@ mod release_tests {
let agg_att = &block_attestations[0];
assert_eq!(
agg_att.num_set_aggregation_bits(),
spec.target_committee_size as usize
spec.target_committee_size
);
// Prune attestations shouldn't do anything at this point.
@@ -1039,7 +1039,7 @@ mod release_tests {
fn attestation_duplicate() {
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(1);
let state = get_current_state_initialize_epoch_cache(&harness, &spec);
let state = get_current_state_initialize_epoch_cache(&harness, spec);
let op_pool = OperationPool::<MainnetEthSpec>::new();
@@ -1082,7 +1082,7 @@ mod release_tests {
fn attestation_pairwise_overlapping() {
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(1);
let state = get_current_state_initialize_epoch_cache(&harness, &spec);
let state = get_current_state_initialize_epoch_cache(&harness, spec);
let op_pool = OperationPool::<MainnetEthSpec>::new();
@@ -1113,19 +1113,17 @@ mod release_tests {
let aggs1 = atts1
.chunks_exact(step_size * 2)
.map(|chunk| {
let agg = chunk.into_iter().map(|(att, _)| att).fold::<Option<
Attestation<MainnetEthSpec>,
>, _>(
None,
|att, new_att| {
let agg = chunk
.iter()
.map(|(att, _)| att)
.fold::<Option<Attestation<MainnetEthSpec>>, _>(None, |att, new_att| {
if let Some(mut a) = att {
a.aggregate(new_att.to_ref());
Some(a)
} else {
Some(new_att.clone())
}
},
);
});
agg.unwrap()
})
.collect::<Vec<_>>();
@@ -1136,19 +1134,17 @@ mod release_tests {
.as_slice()
.chunks_exact(step_size * 2)
.map(|chunk| {
let agg = chunk.into_iter().map(|(att, _)| att).fold::<Option<
Attestation<MainnetEthSpec>,
>, _>(
None,
|att, new_att| {
let agg = chunk
.iter()
.map(|(att, _)| att)
.fold::<Option<Attestation<MainnetEthSpec>>, _>(None, |att, new_att| {
if let Some(mut a) = att {
a.aggregate(new_att.to_ref());
Some(a)
} else {
Some(new_att.clone())
}
},
);
});
agg.unwrap()
})
.collect::<Vec<_>>();
@@ -1181,7 +1177,7 @@ mod release_tests {
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(num_committees);
let mut state = get_current_state_initialize_epoch_cache(&harness, &spec);
let mut state = get_current_state_initialize_epoch_cache(&harness, spec);
let op_pool = OperationPool::<MainnetEthSpec>::new();
@@ -1194,7 +1190,7 @@ mod release_tests {
.collect::<Vec<_>>();
let max_attestations = <MainnetEthSpec as EthSpec>::MaxAttestations::to_usize();
let target_committee_size = spec.target_committee_size as usize;
let target_committee_size = spec.target_committee_size;
let num_validators = num_committees
* MainnetEthSpec::slots_per_epoch() as usize
* spec.target_committee_size;
@@ -1209,12 +1205,12 @@ mod release_tests {
let insert_attestations = |attestations: Vec<(Attestation<MainnetEthSpec>, SubnetId)>,
step_size| {
let att_0 = attestations.get(0).unwrap().0.clone();
let att_0 = attestations.first().unwrap().0.clone();
let aggs = attestations
.chunks_exact(step_size)
.map(|chunk| {
chunk
.into_iter()
.iter()
.map(|(att, _)| att)
.fold::<Attestation<MainnetEthSpec>, _>(
att_0.clone(),
@@ -1296,7 +1292,7 @@ mod release_tests {
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(num_committees);
let mut state = get_current_state_initialize_epoch_cache(&harness, &spec);
let mut state = get_current_state_initialize_epoch_cache(&harness, spec);
let op_pool = OperationPool::<MainnetEthSpec>::new();
let slot = state.slot();
@@ -1308,7 +1304,7 @@ mod release_tests {
.collect::<Vec<_>>();
let max_attestations = <MainnetEthSpec as EthSpec>::MaxAttestations::to_usize();
let target_committee_size = spec.target_committee_size as usize;
let target_committee_size = spec.target_committee_size;
// Each validator will have a multiple of 1_000_000_000 wei.
// Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000).
@@ -1329,12 +1325,12 @@ mod release_tests {
let insert_attestations = |attestations: Vec<(Attestation<MainnetEthSpec>, SubnetId)>,
step_size| {
let att_0 = attestations.get(0).unwrap().0.clone();
let att_0 = attestations.first().unwrap().0.clone();
let aggs = attestations
.chunks_exact(step_size)
.map(|chunk| {
chunk
.into_iter()
.iter()
.map(|(att, _)| att)
.fold::<Attestation<MainnetEthSpec>, _>(
att_0.clone(),
@@ -1615,7 +1611,6 @@ mod release_tests {
let block_root = *state
.get_block_root(state.slot() - Slot::new(1))
.ok()
.expect("block root should exist at slot");
let contributions = harness.make_sync_contributions(
&state,
@@ -1674,7 +1669,6 @@ mod release_tests {
let state = harness.get_current_state();
let block_root = *state
.get_block_root(state.slot() - Slot::new(1))
.ok()
.expect("block root should exist at slot");
let contributions = harness.make_sync_contributions(
&state,
@@ -1711,7 +1705,6 @@ mod release_tests {
let state = harness.get_current_state();
let block_root = *state
.get_block_root(state.slot() - Slot::new(1))
.ok()
.expect("block root should exist at slot");
let contributions = harness.make_sync_contributions(
&state,
@@ -1791,7 +1784,6 @@ mod release_tests {
let state = harness.get_current_state();
let block_root = *state
.get_block_root(state.slot() - Slot::new(1))
.ok()
.expect("block root should exist at slot");
let contributions = harness.make_sync_contributions(
&state,