Merge remote-tracking branch 'origin/unstable' into tree-states

This commit is contained in:
Michael Sproul
2024-04-05 15:14:04 +11:00
455 changed files with 7906 additions and 5229 deletions

View File

@@ -342,7 +342,7 @@ impl GossipTester {
E::slots_per_epoch() + 1
}
// EIP-7045
ForkName::Deneb => {
ForkName::Deneb | ForkName::Electra => {
let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64();
if epoch_slot_offset != 0 {
E::slots_per_epoch() + epoch_slot_offset
@@ -1235,7 +1235,7 @@ async fn attestation_to_finalized_block() {
.chain
.verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id));
assert!(
matches!(res, Err(AttnError:: HeadBlockFinalized { beacon_block_root })
matches!(res, Err(AttnError::HeadBlockFinalized { beacon_block_root })
if beacon_block_root == earlier_block_root
)
);

View File

@@ -840,7 +840,7 @@ async fn invalid_signature_exit() {
}
}
fn unwrap_err<T, E>(result: Result<T, E>) -> E {
fn unwrap_err<T, U>(result: Result<T, U>) -> U {
match result {
Ok(_) => panic!("called unwrap_err on Ok"),
Err(e) => e,
@@ -1087,7 +1087,7 @@ async fn block_gossip_verification() {
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await),
BlockError::BlockIsAlreadyKnown,
BlockError::BlockIsAlreadyKnown(_),
),
"should register any valid signature against the proposer, even if the block failed later verification"
);
@@ -1115,7 +1115,7 @@ async fn block_gossip_verification() {
.verify_block_for_gossip(block.clone())
.await
.expect_err("should error when processing known block"),
BlockError::BlockIsAlreadyKnown
BlockError::BlockIsAlreadyKnown(_)
),
"the second proposal by this validator should be rejected"
);

View File

@@ -7,8 +7,8 @@ use types::*;
const VALIDATOR_COUNT: usize = 32;
type E = MainnetEthSpec;
fn verify_execution_payload_chain<T: EthSpec>(chain: &[FullPayload<T>]) {
let mut prev_ep: Option<FullPayload<T>> = None;
fn verify_execution_payload_chain<E: EthSpec>(chain: &[FullPayload<E>]) {
let mut prev_ep: Option<FullPayload<E>> = None;
for ep in chain {
assert!(!ep.is_default_with_empty_roots());

View File

@@ -8,8 +8,8 @@ const VALIDATOR_COUNT: usize = 32;
type E = MainnetEthSpec;
fn verify_execution_payload_chain<T: EthSpec>(chain: &[FullPayload<T>]) {
let mut prev_ep: Option<FullPayload<T>> = None;
fn verify_execution_payload_chain<E: EthSpec>(chain: &[FullPayload<E>]) {
let mut prev_ep: Option<FullPayload<E>> = None;
for ep in chain {
assert!(!ep.is_default_with_empty_roots());

View File

@@ -2,12 +2,18 @@
#![cfg(not(debug_assertions))]
use beacon_chain::observed_operations::ObservationOutcome;
use beacon_chain::test_utils::{
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
use beacon_chain::{
observed_operations::ObservationOutcome,
test_utils::{
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
},
BeaconChainError,
};
use lazy_static::lazy_static;
use sloggers::{null::NullLoggerBuilder, Build};
use state_processing::per_block_processing::errors::{
AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid,
};
use std::sync::Arc;
use store::{LevelDB, StoreConfig};
use tempfile::{tempdir, TempDir};
@@ -119,6 +125,75 @@ async fn voluntary_exit() {
));
}
#[tokio::test]
async fn voluntary_exit_duplicate_in_state() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec;
harness
.extend_chain(
(E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
harness.advance_slot();
// Exit a validator.
let exited_validator = 0;
let exit =
harness.make_voluntary_exit(exited_validator, Epoch::new(spec.shard_committee_period));
let ObservationOutcome::New(verified_exit) = harness
.chain
.verify_voluntary_exit_for_gossip(exit.clone())
.unwrap()
else {
panic!("exit should verify");
};
harness.chain.import_voluntary_exit(verified_exit);
// Make a new block to include the exit.
harness
.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
// Verify validator is actually exited.
assert_ne!(
harness
.get_current_state()
.validators()
.get(exited_validator as usize)
.unwrap()
.exit_epoch,
spec.far_future_epoch
);
// Clear the in-memory gossip cache & try to verify the same exit on gossip.
// It should still fail because gossip verification should check the validator's `exit_epoch`
// field in the head state.
harness
.chain
.observed_voluntary_exits
.lock()
.__reset_for_testing_only();
assert!(matches!(
harness
.chain
.verify_voluntary_exit_for_gossip(exit)
.unwrap_err(),
BeaconChainError::ExitValidationError(BlockOperationError::Invalid(
ExitInvalid::AlreadyExited(index)
)) if index == exited_validator
));
}
#[test]
fn proposer_slashing() {
let db_path = tempdir().unwrap();
@@ -171,6 +246,63 @@ fn proposer_slashing() {
));
}
#[tokio::test]
async fn proposer_slashing_duplicate_in_state() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
// Slash a validator.
let slashed_validator = 0;
let slashing = harness.make_proposer_slashing(slashed_validator);
let ObservationOutcome::New(verified_slashing) = harness
.chain
.verify_proposer_slashing_for_gossip(slashing.clone())
.unwrap()
else {
panic!("slashing should verify");
};
harness.chain.import_proposer_slashing(verified_slashing);
// Make a new block to include the slashing.
harness
.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
// Verify validator is actually slashed.
assert!(
harness
.get_current_state()
.validators()
.get(slashed_validator as usize)
.unwrap()
.slashed
);
// Clear the in-memory gossip cache & try to verify the same slashing on gossip.
// It should still fail because gossip verification should check the validator's `slashed` field
// in the head state.
harness
.chain
.observed_proposer_slashings
.lock()
.__reset_for_testing_only();
assert!(matches!(
harness
.chain
.verify_proposer_slashing_for_gossip(slashing)
.unwrap_err(),
BeaconChainError::ProposerSlashingValidationError(BlockOperationError::Invalid(
ProposerSlashingInvalid::ProposerNotSlashable(index)
)) if index == slashed_validator
));
}
#[test]
fn attester_slashing() {
let db_path = tempdir().unwrap();
@@ -241,3 +373,60 @@ fn attester_slashing() {
ObservationOutcome::AlreadyKnown
));
}
#[tokio::test]
async fn attester_slashing_duplicate_in_state() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
// Slash a validator.
let slashed_validator = 0;
let slashing = harness.make_attester_slashing(vec![slashed_validator]);
let ObservationOutcome::New(verified_slashing) = harness
.chain
.verify_attester_slashing_for_gossip(slashing.clone())
.unwrap()
else {
panic!("slashing should verify");
};
harness.chain.import_attester_slashing(verified_slashing);
// Make a new block to include the slashing.
harness
.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
// Verify validator is actually slashed.
assert!(
harness
.get_current_state()
.validators()
.get(slashed_validator as usize)
.unwrap()
.slashed
);
// Clear the in-memory gossip cache & try to verify the same slashing on gossip.
// It should still fail because gossip verification should check the validator's `slashed` field
// in the head state.
harness
.chain
.observed_attester_slashings
.lock()
.__reset_for_testing_only();
assert!(matches!(
harness
.chain
.verify_attester_slashing_for_gossip(slashing)
.unwrap_err(),
BeaconChainError::AttesterSlashingValidationError(BlockOperationError::Invalid(
AttesterSlashingInvalid::NoSlashableIndices
))
));
}

View File

@@ -3339,13 +3339,12 @@ fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b
a_head.beacon_block, b_head.beacon_block,
"head blocks should be equal"
);
// Clone with committee caches only to prevent other caches from messing with the equality
// check.
assert_eq!(
a_head.beacon_state.clone(),
b_head.beacon_state.clone(),
"head states should be equal"
);
// Drop all caches to prevent them messing with the equality check.
let mut a_head_state = a_head.beacon_state.clone();
a_head_state.drop_all_caches().unwrap();
let mut b_head_state = b_head.beacon_state.clone();
b_head_state.drop_all_caches().unwrap();
assert_eq!(a_head_state, b_head_state, "head states should be equal");
assert_eq!(a.heads(), b.heads(), "heads() should be equal");
assert_eq!(
a.genesis_block_root, b.genesis_block_root,

View File

@@ -203,8 +203,12 @@ async fn produces_missed_blocks() {
// making sure that the cache reloads when the epoch changes
// in that scenario the slot that missed a block is the first slot of the epoch
validator_index_to_monitor = 7;
// We are adding other validators to monitor as thoses one will miss a block depending on
// the fork name specified when running the test as the proposer cache differs depending on the fork name (cf. seed)
// We are adding other validators to monitor as these ones will miss a block depending on
// the fork name specified when running the test as the proposer cache differs depending on
// the fork name (cf. seed)
//
// If you are adding a new fork and seeing errors, print
// `validator_indexes[slot_in_epoch.as_usize()]` and add it below.
let validator_index_to_monitor_altair = 2;
// Same as above but for the merge upgrade
let validator_index_to_monitor_merge = 4;
@@ -212,6 +216,9 @@ async fn produces_missed_blocks() {
let validator_index_to_monitor_capella = 11;
// Same as above but for the deneb upgrade
let validator_index_to_monitor_deneb = 3;
// Same as above but for the electra upgrade
let validator_index_to_monitor_electra = 6;
let harness2 = get_harness(
validator_count,
vec![
@@ -220,6 +227,7 @@ async fn produces_missed_blocks() {
validator_index_to_monitor_merge,
validator_index_to_monitor_capella,
validator_index_to_monitor_deneb,
validator_index_to_monitor_electra,
],
);
let advance_slot_by = 9;
@@ -243,6 +251,10 @@ async fn produces_missed_blocks() {
duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap();
validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap();
validator_index = validator_indexes[slot_in_epoch.as_usize()];
// If you are adding a new fork and seeing errors, it means the fork seed has changed the
// validator_index. Uncomment this line, run the test again and add the resulting index to the
// list above.
//eprintln!("new index which needs to be added => {:?}", validator_index);
let beacon_proposer_cache = harness2
.chain