mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-21 14:58:31 +00:00
Closes: - https://github.com/sigp/lighthouse/issues/8869 - Update `BlockReplayer` to support replay of execution payload envelopes. - Update `HotColdDB` to load payload envelopes and feed them to the `BlockReplayer` for both hot + cold states. However the cold DB code is not fully working yet (see: https://github.com/sigp/lighthouse/issues/8958). - Add `StatePayloadStatus` to allow callers to specify whether they want a state with a payload applied, or not. - Fix the state cache to key by `StatePayloadStatus`. - Lots of fixes to block production and block processing regarding state management. - Initial test harness support for producing+processing Gloas blocks+envelopes - A few new tests to cover Gloas DB operations Co-Authored-By: Eitan Seri- Levi <eserilev@gmail.com> Co-Authored-By: Eitan Seri-Levi <eserilev@ucsc.edu> Co-Authored-By: Michael Sproul <michael@sigmaprime.io> Co-Authored-By: Michael Sproul <michaelsproul@users.noreply.github.com> Co-Authored-By: Jimmy Chen <jchen.tc@gmail.com>
1030 lines
33 KiB
Rust
1030 lines
33 KiB
Rust
#![cfg(all(test, not(feature = "fake_crypto"), not(debug_assertions)))]
|
|
|
|
use crate::per_block_processing::errors::{
|
|
AttestationInvalid, AttesterSlashingInvalid, BlockOperationError, BlockProcessingError,
|
|
DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex,
|
|
ProposerSlashingInvalid,
|
|
};
|
|
use crate::{BlockReplayError, BlockReplayer, per_block_processing};
|
|
use crate::{
|
|
BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures,
|
|
per_block_processing::process_operations,
|
|
};
|
|
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
|
|
use bls::{AggregateSignature, Keypair, PublicKeyBytes, Signature, SignatureBytes};
|
|
use fixed_bytes::FixedBytesExtended;
|
|
use ssz_types::Bitfield;
|
|
use ssz_types::VariableList;
|
|
use std::sync::{Arc, LazyLock};
|
|
use test_utils::generate_deterministic_keypairs;
|
|
use types::*;
|
|
|
|
pub const MAX_VALIDATOR_COUNT: usize = 97;
|
|
pub const NUM_DEPOSITS: u64 = 1;
|
|
pub const VALIDATOR_COUNT: usize = 64;
|
|
pub const EPOCH_OFFSET: u64 = 4;
|
|
pub const NUM_ATTESTATIONS: u64 = 1;
|
|
|
|
// When set to true, cache any states fetched from the db.
|
|
pub const CACHE_STATE_IN_TESTS: bool = true;
|
|
|
|
/// A cached set of keys.
|
|
static KEYPAIRS: LazyLock<Vec<Keypair>> =
|
|
LazyLock::new(|| generate_deterministic_keypairs(MAX_VALIDATOR_COUNT));
|
|
|
|
async fn get_harness<E: EthSpec>(
|
|
epoch_offset: u64,
|
|
num_validators: usize,
|
|
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
|
// Set the state and block to be in the last slot of the `epoch_offset`th epoch.
|
|
let last_slot_of_epoch =
|
|
(MainnetEthSpec::genesis_epoch() + epoch_offset).end_slot(E::slots_per_epoch());
|
|
// Use Electra spec to ensure blocks are created at the same fork as the state
|
|
let spec = Arc::new(ForkName::Electra.make_genesis_spec(E::default_spec()));
|
|
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
|
|
.spec(spec.clone())
|
|
.keypairs(KEYPAIRS[0..num_validators].to_vec())
|
|
.fresh_ephemeral_store()
|
|
.mock_execution_layer()
|
|
.build();
|
|
let state = harness.get_current_state();
|
|
if last_slot_of_epoch > Slot::new(0) {
|
|
harness
|
|
.add_attested_blocks_at_slots(
|
|
state,
|
|
Hash256::zero(),
|
|
(1..last_slot_of_epoch.as_u64())
|
|
.map(Slot::new)
|
|
.collect::<Vec<_>>()
|
|
.as_slice(),
|
|
(0..num_validators).collect::<Vec<_>>().as_slice(),
|
|
)
|
|
.await;
|
|
}
|
|
harness
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn valid_block_ok() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let state = harness.get_current_state();
|
|
|
|
let slot = state.slot();
|
|
let ((block, _), mut state) = harness
|
|
.make_block_return_pre_state(state, slot + Slot::new(1))
|
|
.await;
|
|
|
|
let mut ctxt = ConsensusContext::new(block.slot());
|
|
let result = per_block_processing(
|
|
&mut state,
|
|
&block,
|
|
BlockSignatureStrategy::VerifyIndividual,
|
|
VerifyBlockRoot::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
assert!(result.is_ok());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_block_header_state_slot() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let state = harness.get_current_state();
|
|
let slot = state.slot() + Slot::new(1);
|
|
|
|
let ((signed_block, _), mut state) = harness.make_block_return_pre_state(state, slot).await;
|
|
let (mut block, signature) = (*signed_block).clone().deconstruct();
|
|
*block.slot_mut() = slot + Slot::new(1);
|
|
|
|
let mut ctxt = ConsensusContext::new(block.slot());
|
|
let result = per_block_processing(
|
|
&mut state,
|
|
&SignedBeaconBlock::from_block(block, signature),
|
|
BlockSignatureStrategy::VerifyIndividual,
|
|
VerifyBlockRoot::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
assert!(matches!(
|
|
result,
|
|
Err(BlockProcessingError::HeaderInvalid {
|
|
reason: HeaderInvalid::StateSlotMismatch,
|
|
})
|
|
));
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_parent_block_root() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let state = harness.get_current_state();
|
|
let slot = state.slot();
|
|
|
|
let ((signed_block, _), mut state) = harness
|
|
.make_block_return_pre_state(state, slot + Slot::new(1))
|
|
.await;
|
|
let (mut block, signature) = (*signed_block).clone().deconstruct();
|
|
*block.parent_root_mut() = Hash256::from([0xAA; 32]);
|
|
|
|
let mut ctxt = ConsensusContext::new(block.slot());
|
|
let result = per_block_processing(
|
|
&mut state,
|
|
&SignedBeaconBlock::from_block(block, signature),
|
|
BlockSignatureStrategy::VerifyIndividual,
|
|
VerifyBlockRoot::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
assert!(matches!(
|
|
result,
|
|
Err(BlockProcessingError::HeaderInvalid {
|
|
reason: HeaderInvalid::ParentBlockRootMismatch { .. },
|
|
})
|
|
));
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_block_signature() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let state = harness.get_current_state();
|
|
let slot = state.slot();
|
|
let ((signed_block, _), mut state) = harness
|
|
.make_block_return_pre_state(state, slot + Slot::new(1))
|
|
.await;
|
|
let (block, _) = (*signed_block).clone().deconstruct();
|
|
|
|
let mut ctxt = ConsensusContext::new(block.slot());
|
|
let result = per_block_processing(
|
|
&mut state,
|
|
&SignedBeaconBlock::from_block(block, Signature::empty()),
|
|
BlockSignatureStrategy::VerifyIndividual,
|
|
VerifyBlockRoot::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
assert!(matches!(
|
|
result,
|
|
Err(BlockProcessingError::HeaderInvalid {
|
|
reason: HeaderInvalid::ProposalSignatureInvalid,
|
|
})
|
|
));
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_randao_reveal_signature() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let state = harness.get_current_state();
|
|
let slot = state.slot();
|
|
|
|
let ((signed_block, _), mut state) = harness
|
|
.make_block_with_modifier(state, slot + 1, |block| {
|
|
*block.body_mut().randao_reveal_mut() = Signature::empty();
|
|
})
|
|
.await;
|
|
|
|
let mut ctxt = ConsensusContext::new(signed_block.slot());
|
|
let result = per_block_processing(
|
|
&mut state,
|
|
&signed_block,
|
|
BlockSignatureStrategy::VerifyIndividual,
|
|
VerifyBlockRoot::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// should get a BadRandaoSignature error
|
|
assert_eq!(result, Err(BlockProcessingError::RandaoSignatureInvalid));
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn valid_4_deposits() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let mut state = harness.get_current_state();
|
|
|
|
let (deposits, state) = harness.make_deposits(&mut state, 4, None, None);
|
|
let deposits = VariableList::try_from(deposits).unwrap();
|
|
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
*head_block.to_mut().body_mut().deposits_mut() = deposits;
|
|
|
|
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
|
|
|
// Expecting Ok because these are valid deposits.
|
|
assert_eq!(result, Ok(()));
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_deposit_deposit_count_too_big() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let mut state = harness.get_current_state();
|
|
|
|
let (deposits, state) = harness.make_deposits(&mut state, 1, None, None);
|
|
let deposits = VariableList::try_from(deposits).unwrap();
|
|
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
*head_block.to_mut().body_mut().deposits_mut() = deposits;
|
|
|
|
let big_deposit_count = NUM_DEPOSITS + 1;
|
|
state.eth1_data_mut().deposit_count = big_deposit_count;
|
|
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
|
|
|
// Expecting DepositCountInvalid because we incremented the deposit_count
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::DepositCountInvalid {
|
|
expected: big_deposit_count as usize,
|
|
found: 1
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_deposit_count_too_small() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let mut state = harness.get_current_state();
|
|
|
|
let (deposits, state) = harness.make_deposits(&mut state, 1, None, None);
|
|
let deposits = VariableList::try_from(deposits).unwrap();
|
|
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
*head_block.to_mut().body_mut().deposits_mut() = deposits;
|
|
|
|
let small_deposit_count = NUM_DEPOSITS - 1;
|
|
state.eth1_data_mut().deposit_count = small_deposit_count;
|
|
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
|
|
|
// Expecting DepositCountInvalid because we decremented the deposit_count
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::DepositCountInvalid {
|
|
expected: small_deposit_count as usize,
|
|
found: 1
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_deposit_bad_merkle_proof() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let mut state = harness.get_current_state();
|
|
|
|
let (deposits, state) = harness.make_deposits(&mut state, 1, None, None);
|
|
let deposits = VariableList::try_from(deposits).unwrap();
|
|
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
*head_block.to_mut().body_mut().deposits_mut() = deposits;
|
|
let bad_index = state.eth1_deposit_index() as usize;
|
|
|
|
// Manually offsetting deposit count and index to trigger bad merkle proof
|
|
state.eth1_data_mut().deposit_count += 1;
|
|
*state.eth1_deposit_index_mut() += 1;
|
|
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
|
|
|
// Expecting BadMerkleProof because the proofs were created with different indices
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::DepositInvalid {
|
|
index: bad_index,
|
|
reason: DepositInvalid::BadMerkleProof
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_deposit_wrong_sig() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let mut state = harness.get_current_state();
|
|
|
|
let (deposits, state) =
|
|
harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty()));
|
|
let deposits = VariableList::try_from(deposits).unwrap();
|
|
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
*head_block.to_mut().body_mut().deposits_mut() = deposits;
|
|
|
|
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
|
// Expecting Ok(()) even though the block signature does not correspond to the correct public key
|
|
assert_eq!(result, Ok(()));
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_deposit_invalid_pub_key() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let mut state = harness.get_current_state();
|
|
|
|
let (deposits, state) =
|
|
harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None);
|
|
let deposits = VariableList::try_from(deposits).unwrap();
|
|
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
*head_block.to_mut().body_mut().deposits_mut() = deposits;
|
|
|
|
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
|
|
|
// Expecting Ok(()) even though we passed in invalid publickeybytes in the public key field of the deposit data.
|
|
assert_eq!(result, Ok(()));
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_attestation_no_committee_for_index() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
head_block
|
|
.to_mut()
|
|
.body_mut()
|
|
.attestations_mut()
|
|
.next()
|
|
.unwrap()
|
|
.data_mut()
|
|
.index += 1;
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attestations(
|
|
&mut state,
|
|
head_block.body(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting NoCommittee because we manually set the attestation's index to be invalid
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::AttestationInvalid {
|
|
index: 0,
|
|
reason: AttestationInvalid::BadCommitteeIndex
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_attestation_wrong_justified_checkpoint() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
let old_justified_checkpoint = head_block
|
|
.body()
|
|
.attestations()
|
|
.next()
|
|
.unwrap()
|
|
.data()
|
|
.source;
|
|
let mut new_justified_checkpoint = old_justified_checkpoint;
|
|
new_justified_checkpoint.epoch += Epoch::new(1);
|
|
head_block
|
|
.to_mut()
|
|
.body_mut()
|
|
.attestations_mut()
|
|
.next()
|
|
.unwrap()
|
|
.data_mut()
|
|
.source = new_justified_checkpoint;
|
|
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attestations(
|
|
&mut state,
|
|
head_block.body(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting WrongJustifiedCheckpoint because we manually set the
|
|
// source field of the AttestationData object to be invalid
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::AttestationInvalid {
|
|
index: 0,
|
|
reason: AttestationInvalid::WrongJustifiedCheckpoint {
|
|
state: Box::new(old_justified_checkpoint),
|
|
attestation: Box::new(new_justified_checkpoint),
|
|
is_current: true,
|
|
}
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_attestation_bad_aggregation_bitfield_len() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
// Use Electra method since harness runs at Electra fork
|
|
*head_block
|
|
.to_mut()
|
|
.body_mut()
|
|
.attestations_mut()
|
|
.next()
|
|
.unwrap()
|
|
.aggregation_bits_electra_mut()
|
|
.unwrap() = Bitfield::with_capacity(spec.target_committee_size).unwrap();
|
|
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attestations(
|
|
&mut state,
|
|
head_block.body(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// In Electra, setting wrong aggregation_bits capacity causes EmptyCommittee error
|
|
// (validation order changed - committee check happens before bitfield check)
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::BeaconStateError(
|
|
BeaconStateError::EmptyCommittee
|
|
))
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_attestation_bad_signature() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, 97).await; // minimal number of required validators for this test
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
*head_block
|
|
.to_mut()
|
|
.body_mut()
|
|
.attestations_mut()
|
|
.next()
|
|
.unwrap()
|
|
.signature_mut() = AggregateSignature::empty();
|
|
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attestations(
|
|
&mut state,
|
|
head_block.body(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
// Expecting BadSignature because we're signing with invalid secret_keys
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::AttestationInvalid {
|
|
index: 0,
|
|
reason: AttestationInvalid::BadIndexedAttestation(
|
|
IndexedAttestationInvalid::BadSignature
|
|
)
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_attestation_included_too_early() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
let new_attesation_slot = head_block.body().attestations().next().unwrap().data().slot
|
|
+ Slot::new(MainnetEthSpec::slots_per_epoch());
|
|
head_block
|
|
.to_mut()
|
|
.body_mut()
|
|
.attestations_mut()
|
|
.next()
|
|
.unwrap()
|
|
.data_mut()
|
|
.slot = new_attesation_slot;
|
|
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attestations(
|
|
&mut state,
|
|
head_block.body(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting IncludedTooEarly because the shard included in the crosslink is bigger than expected
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::AttestationInvalid {
|
|
index: 0,
|
|
reason: AttestationInvalid::IncludedTooEarly {
|
|
state: state.slot(),
|
|
delay: spec.min_attestation_inclusion_delay,
|
|
attestation: new_attesation_slot,
|
|
}
|
|
})
|
|
);
|
|
}
|
|
|
|
// Note: `invalid_attestation_included_too_late` test removed.
|
|
// The `IncludedTooLate` check was removed in Deneb (EIP7045), so this test is no longer
|
|
// applicable when running with Electra spec (which the harness uses by default).
|
|
|
|
#[tokio::test]
|
|
async fn invalid_attestation_target_epoch_slot_mismatch() {
|
|
// note to maintainer: might need to increase validator count if we get NoCommittee
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut head_block = harness
|
|
.chain
|
|
.head_beacon_block()
|
|
.as_ref()
|
|
.clone()
|
|
.deconstruct()
|
|
.0;
|
|
head_block
|
|
.to_mut()
|
|
.body_mut()
|
|
.attestations_mut()
|
|
.next()
|
|
.unwrap()
|
|
.data_mut()
|
|
.target
|
|
.epoch += Epoch::new(1);
|
|
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attestations(
|
|
&mut state,
|
|
head_block.body(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::AttestationInvalid {
|
|
index: 0,
|
|
reason: AttestationInvalid::TargetEpochSlotMismatch {
|
|
target_epoch: Epoch::new(EPOCH_OFFSET + 1),
|
|
slot_epoch: Epoch::new(EPOCH_OFFSET),
|
|
}
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn valid_insert_attester_slashing() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let attester_slashing = harness.make_attester_slashing(vec![1, 2]);
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attester_slashings(
|
|
&mut state,
|
|
[attester_slashing.to_ref()].into_iter(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting Ok(()) because attester slashing is valid
|
|
assert_eq!(result, Ok(()));
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_attester_slashing_not_slashable() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]);
|
|
match &mut attester_slashing {
|
|
AttesterSlashing::Base(attester_slashing) => {
|
|
attester_slashing.attestation_1 = attester_slashing.attestation_2.clone();
|
|
}
|
|
AttesterSlashing::Electra(attester_slashing) => {
|
|
attester_slashing.attestation_1 = attester_slashing.attestation_2.clone();
|
|
}
|
|
}
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attester_slashings(
|
|
&mut state,
|
|
[attester_slashing.to_ref()].into_iter(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting NotSlashable because the two attestations are the same
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::AttesterSlashingInvalid {
|
|
index: 0,
|
|
reason: AttesterSlashingInvalid::NotSlashable
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_attester_slashing_1_invalid() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]);
|
|
match &mut attester_slashing {
|
|
AttesterSlashing::Base(attester_slashing) => {
|
|
attester_slashing.attestation_1.attesting_indices =
|
|
VariableList::try_from(vec![2, 1]).unwrap();
|
|
}
|
|
AttesterSlashing::Electra(attester_slashing) => {
|
|
attester_slashing.attestation_1.attesting_indices =
|
|
VariableList::try_from(vec![2, 1]).unwrap();
|
|
}
|
|
}
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attester_slashings(
|
|
&mut state,
|
|
[attester_slashing.to_ref()].into_iter(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
assert_eq!(
|
|
result,
|
|
Err(
|
|
BlockOperationError::Invalid(AttesterSlashingInvalid::IndexedAttestation1Invalid(
|
|
BlockOperationError::Invalid(
|
|
IndexedAttestationInvalid::BadValidatorIndicesOrdering(0)
|
|
)
|
|
))
|
|
.into_with_index(0)
|
|
)
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_attester_slashing_2_invalid() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]);
|
|
match &mut attester_slashing {
|
|
AttesterSlashing::Base(attester_slashing) => {
|
|
attester_slashing.attestation_2.attesting_indices =
|
|
VariableList::try_from(vec![2, 1]).unwrap();
|
|
}
|
|
AttesterSlashing::Electra(attester_slashing) => {
|
|
attester_slashing.attestation_2.attesting_indices =
|
|
VariableList::try_from(vec![2, 1]).unwrap();
|
|
}
|
|
}
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_attester_slashings(
|
|
&mut state,
|
|
[attester_slashing.to_ref()].into_iter(),
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
assert_eq!(
|
|
result,
|
|
Err(
|
|
BlockOperationError::Invalid(AttesterSlashingInvalid::IndexedAttestation2Invalid(
|
|
BlockOperationError::Invalid(
|
|
IndexedAttestationInvalid::BadValidatorIndicesOrdering(0)
|
|
)
|
|
))
|
|
.into_with_index(0)
|
|
)
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn valid_insert_proposer_slashing() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let proposer_slashing = harness.make_proposer_slashing(1);
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_proposer_slashings(
|
|
&mut state,
|
|
&[proposer_slashing],
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
// Expecting Ok(_) because we inserted a valid proposer slashing
|
|
assert!(result.is_ok());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_proposer_slashing_proposals_identical() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut proposer_slashing = harness.make_proposer_slashing(1);
|
|
proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone();
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_proposer_slashings(
|
|
&mut state,
|
|
&[proposer_slashing],
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting ProposalsIdentical because we the two headers are identical
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::ProposerSlashingInvalid {
|
|
index: 0,
|
|
reason: ProposerSlashingInvalid::ProposalsIdentical
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_proposer_slashing_proposer_unknown() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let mut proposer_slashing = harness.make_proposer_slashing(1);
|
|
proposer_slashing.signed_header_1.message.proposer_index = 3_141_592;
|
|
proposer_slashing.signed_header_2.message.proposer_index = 3_141_592;
|
|
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_proposer_slashings(
|
|
&mut state,
|
|
&[proposer_slashing],
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting ProposerUnknown because validator_index is unknown
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::ProposerSlashingInvalid {
|
|
index: 0,
|
|
reason: ProposerSlashingInvalid::ProposerUnknown(3_141_592)
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_proposer_slashing_duplicate_slashing() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
|
|
let proposer_slashing = harness.make_proposer_slashing(1);
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result_1 = process_operations::process_proposer_slashings(
|
|
&mut state,
|
|
std::slice::from_ref(&proposer_slashing),
|
|
VerifySignatures::False,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
assert!(result_1.is_ok());
|
|
|
|
let result_2 = process_operations::process_proposer_slashings(
|
|
&mut state,
|
|
std::slice::from_ref(&proposer_slashing),
|
|
VerifySignatures::False,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
// Expecting ProposerNotSlashable because we've already slashed the validator
|
|
assert_eq!(
|
|
result_2,
|
|
Err(BlockProcessingError::ProposerSlashingInvalid {
|
|
index: 0,
|
|
reason: ProposerSlashingInvalid::ProposerNotSlashable(1)
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_bad_proposal_1_signature() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let mut proposer_slashing = harness.make_proposer_slashing(1);
|
|
proposer_slashing.signed_header_1.signature = Signature::empty();
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_proposer_slashings(
|
|
&mut state,
|
|
&[proposer_slashing],
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting BadProposal1Signature because signature of proposal 1 is invalid
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::ProposerSlashingInvalid {
|
|
index: 0,
|
|
reason: ProposerSlashingInvalid::BadProposal1Signature
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_bad_proposal_2_signature() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let mut proposer_slashing = harness.make_proposer_slashing(1);
|
|
proposer_slashing.signed_header_2.signature = Signature::empty();
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_proposer_slashings(
|
|
&mut state,
|
|
&[proposer_slashing],
|
|
VerifySignatures::True,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting BadProposal2Signature because signature of proposal 2 is invalid
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::ProposerSlashingInvalid {
|
|
index: 0,
|
|
reason: ProposerSlashingInvalid::BadProposal2Signature
|
|
})
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn invalid_proposer_slashing_proposal_epoch_mismatch() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
let spec = harness.spec.clone();
|
|
let mut proposer_slashing = harness.make_proposer_slashing(1);
|
|
proposer_slashing.signed_header_1.message.slot = Slot::new(0);
|
|
proposer_slashing.signed_header_2.message.slot = Slot::new(128);
|
|
let mut state = harness.get_current_state();
|
|
let mut ctxt = ConsensusContext::new(state.slot());
|
|
let result = process_operations::process_proposer_slashings(
|
|
&mut state,
|
|
&[proposer_slashing],
|
|
VerifySignatures::False,
|
|
&mut ctxt,
|
|
&spec,
|
|
);
|
|
|
|
// Expecting ProposalEpochMismatch because the two epochs are different
|
|
assert_eq!(
|
|
result,
|
|
Err(BlockProcessingError::ProposerSlashingInvalid {
|
|
index: 0,
|
|
reason: ProposerSlashingInvalid::ProposalSlotMismatch(
|
|
Slot::from(0_u64),
|
|
Slot::from(128_u64)
|
|
)
|
|
})
|
|
);
|
|
}
|
|
|
|
/// Check that the block replayer does not consume state roots unnecessarily.
|
|
#[tokio::test]
|
|
async fn block_replayer_peeking_state_roots() {
|
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
|
|
|
let target_state = harness.get_current_state();
|
|
let target_block_root = harness.head_block_root();
|
|
let target_block = harness
|
|
.chain
|
|
.get_blinded_block(&target_block_root)
|
|
.unwrap()
|
|
.unwrap();
|
|
|
|
let parent_block_root = target_block.parent_root();
|
|
let parent_block = harness
|
|
.chain
|
|
.get_blinded_block(&parent_block_root)
|
|
.unwrap()
|
|
.unwrap();
|
|
// Cache the state to make CI go brr.
|
|
let parent_state = harness
|
|
.chain
|
|
.get_state(&parent_block.state_root(), Some(parent_block.slot()), true)
|
|
.unwrap()
|
|
.unwrap();
|
|
|
|
// Omit the state root for `target_state` but provide a dummy state root at the *next* slot.
|
|
// If the block replayer is peeking at the state roots rather than consuming them, then the
|
|
// dummy state should still be there after block replay completes.
|
|
let dummy_state_root = Hash256::repeat_byte(0xff);
|
|
let dummy_slot = target_state.slot() + 1;
|
|
let state_root_iter = vec![Ok::<_, BlockReplayError>((dummy_state_root, dummy_slot))];
|
|
let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec)
|
|
.state_root_iter(state_root_iter.into_iter())
|
|
.no_signature_verification()
|
|
.apply_blocks(vec![target_block], vec![], None)
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
block_replayer
|
|
.state_root_iter
|
|
.unwrap()
|
|
.next()
|
|
.unwrap()
|
|
.unwrap(),
|
|
(dummy_state_root, dummy_slot)
|
|
);
|
|
}
|