Use E for EthSpec globally (#5264)

* Use `E` for `EthSpec` globally

* Fix tests

* Merge branch 'unstable' into e-ethspec

* Merge branch 'unstable' into e-ethspec

# Conflicts:
#	beacon_node/execution_layer/src/engine_api.rs
#	beacon_node/execution_layer/src/engine_api/http.rs
#	beacon_node/execution_layer/src/engine_api/json_structures.rs
#	beacon_node/execution_layer/src/test_utils/handle_rpc.rs
#	beacon_node/store/src/partial_beacon_state.rs
#	consensus/types/src/beacon_block.rs
#	consensus/types/src/beacon_block_body.rs
#	consensus/types/src/beacon_state.rs
#	consensus/types/src/config_and_preset.rs
#	consensus/types/src/execution_payload.rs
#	consensus/types/src/execution_payload_header.rs
#	consensus/types/src/light_client_optimistic_update.rs
#	consensus/types/src/payload.rs
#	lcli/src/parse_ssz.rs
This commit is contained in:
Mac L
2024-04-03 02:12:25 +11:00
committed by GitHub
parent f8fdb71f50
commit 969d12dc6f
230 changed files with 2743 additions and 2792 deletions

View File

@@ -12,17 +12,17 @@ use types::{
};
#[derive(Debug, Clone)]
pub struct AttMaxCover<'a, T: EthSpec> {
pub struct AttMaxCover<'a, E: EthSpec> {
/// Underlying attestation.
pub att: AttestationRef<'a, T>,
pub att: AttestationRef<'a, E>,
/// Mapping of validator indices and their rewards.
pub fresh_validators_rewards: HashMap<u64, u64>,
}
impl<'a, T: EthSpec> AttMaxCover<'a, T> {
impl<'a, E: EthSpec> AttMaxCover<'a, E> {
pub fn new(
att: AttestationRef<'a, T>,
state: &BeaconState<T>,
att: AttestationRef<'a, E>,
state: &BeaconState<E>,
reward_cache: &'a RewardCache,
total_active_balance: u64,
spec: &ChainSpec,
@@ -36,9 +36,9 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
/// Initialise an attestation cover object for base/phase0 hard fork.
pub fn new_for_base(
att: AttestationRef<'a, T>,
state: &BeaconState<T>,
base_state: &BeaconStateBase<T>,
att: AttestationRef<'a, E>,
state: &BeaconState<E>,
base_state: &BeaconStateBase<E>,
total_active_balance: u64,
spec: &ChainSpec,
) -> Option<Self> {
@@ -46,7 +46,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
let committee = state
.get_beacon_committee(att.data.slot, att.data.index)
.ok()?;
let indices = get_attesting_indices::<T>(committee.committee, &fresh_validators).ok()?;
let indices = get_attesting_indices::<E>(committee.committee, &fresh_validators).ok()?;
let fresh_validators_rewards: HashMap<u64, u64> = indices
.iter()
.copied()
@@ -70,8 +70,8 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
/// Initialise an attestation cover object for Altair or later.
pub fn new_for_altair_deneb(
att: AttestationRef<'a, T>,
state: &BeaconState<T>,
att: AttestationRef<'a, E>,
state: &BeaconState<E>,
reward_cache: &'a RewardCache,
total_active_balance: u64,
spec: &ChainSpec,
@@ -123,16 +123,16 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
}
}
impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> {
type Object = Attestation<T>;
type Intermediate = AttestationRef<'a, T>;
impl<'a, E: EthSpec> MaxCover for AttMaxCover<'a, E> {
type Object = Attestation<E>;
type Intermediate = AttestationRef<'a, E>;
type Set = HashMap<u64, u64>;
fn intermediate(&self) -> &AttestationRef<'a, T> {
fn intermediate(&self) -> &AttestationRef<'a, E> {
&self.att
}
fn convert_to_object(att_ref: &AttestationRef<'a, T>) -> Attestation<T> {
fn convert_to_object(att_ref: &AttestationRef<'a, E>) -> Attestation<E> {
att_ref.clone_as_attestation()
}
@@ -152,7 +152,7 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> {
/// of slashable voting, which is rare.
fn update_covering_set(
&mut self,
best_att: &AttestationRef<'a, T>,
best_att: &AttestationRef<'a, E>,
covered_validators: &HashMap<u64, u64>,
) {
if self.att.data.slot == best_att.data.slot && self.att.data.index == best_att.data.index {
@@ -175,11 +175,11 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> {
/// removed from the `aggregation_bits` before returning it.
///
/// This isn't optimal, but with the Altair fork this code is obsolete and not worth upgrading.
pub fn earliest_attestation_validators<T: EthSpec>(
attestation: &AttestationRef<T>,
state: &BeaconState<T>,
base_state: &BeaconStateBase<T>,
) -> BitList<T::MaxValidatorsPerCommittee> {
pub fn earliest_attestation_validators<E: EthSpec>(
attestation: &AttestationRef<E>,
state: &BeaconState<E>,
base_state: &BeaconStateBase<E>,
) -> BitList<E::MaxValidatorsPerCommittee> {
// Bitfield of validators whose attestations are new/fresh.
let mut new_validators = attestation.indexed.aggregation_bits.clone();

View File

@@ -21,38 +21,38 @@ pub struct CompactAttestationData {
}
#[derive(Debug, PartialEq)]
pub struct CompactIndexedAttestation<T: EthSpec> {
pub struct CompactIndexedAttestation<E: EthSpec> {
pub attesting_indices: Vec<u64>,
pub aggregation_bits: BitList<T::MaxValidatorsPerCommittee>,
pub aggregation_bits: BitList<E::MaxValidatorsPerCommittee>,
pub signature: AggregateSignature,
}
#[derive(Debug)]
pub struct SplitAttestation<T: EthSpec> {
pub struct SplitAttestation<E: EthSpec> {
pub checkpoint: CheckpointKey,
pub data: CompactAttestationData,
pub indexed: CompactIndexedAttestation<T>,
pub indexed: CompactIndexedAttestation<E>,
}
#[derive(Debug, Clone)]
pub struct AttestationRef<'a, T: EthSpec> {
pub struct AttestationRef<'a, E: EthSpec> {
pub checkpoint: &'a CheckpointKey,
pub data: &'a CompactAttestationData,
pub indexed: &'a CompactIndexedAttestation<T>,
pub indexed: &'a CompactIndexedAttestation<E>,
}
#[derive(Debug, Default, PartialEq)]
pub struct AttestationMap<T: EthSpec> {
checkpoint_map: HashMap<CheckpointKey, AttestationDataMap<T>>,
pub struct AttestationMap<E: EthSpec> {
checkpoint_map: HashMap<CheckpointKey, AttestationDataMap<E>>,
}
#[derive(Debug, Default, PartialEq)]
pub struct AttestationDataMap<T: EthSpec> {
attestations: HashMap<CompactAttestationData, Vec<CompactIndexedAttestation<T>>>,
pub struct AttestationDataMap<E: EthSpec> {
attestations: HashMap<CompactAttestationData, Vec<CompactIndexedAttestation<E>>>,
}
impl<T: EthSpec> SplitAttestation<T> {
pub fn new(attestation: Attestation<T>, attesting_indices: Vec<u64>) -> Self {
impl<E: EthSpec> SplitAttestation<E> {
pub fn new(attestation: Attestation<E>, attesting_indices: Vec<u64>) -> Self {
let checkpoint = CheckpointKey {
source: attestation.data.source,
target_epoch: attestation.data.target.epoch,
@@ -75,7 +75,7 @@ impl<T: EthSpec> SplitAttestation<T> {
}
}
pub fn as_ref(&self) -> AttestationRef<T> {
pub fn as_ref(&self) -> AttestationRef<E> {
AttestationRef {
checkpoint: &self.checkpoint,
data: &self.data,
@@ -84,7 +84,7 @@ impl<T: EthSpec> SplitAttestation<T> {
}
}
impl<'a, T: EthSpec> AttestationRef<'a, T> {
impl<'a, E: EthSpec> AttestationRef<'a, E> {
pub fn attestation_data(&self) -> AttestationData {
AttestationData {
slot: self.data.slot,
@@ -98,7 +98,7 @@ impl<'a, T: EthSpec> AttestationRef<'a, T> {
}
}
pub fn clone_as_attestation(&self) -> Attestation<T> {
pub fn clone_as_attestation(&self) -> Attestation<E> {
Attestation {
aggregation_bits: self.indexed.aggregation_bits.clone(),
data: self.attestation_data(),
@@ -110,7 +110,7 @@ impl<'a, T: EthSpec> AttestationRef<'a, T> {
impl CheckpointKey {
/// Return two checkpoint keys: `(previous, current)` for the previous and current epochs of
/// the `state`.
pub fn keys_for_state<T: EthSpec>(state: &BeaconState<T>) -> (Self, Self) {
pub fn keys_for_state<E: EthSpec>(state: &BeaconState<E>) -> (Self, Self) {
(
CheckpointKey {
source: state.previous_justified_checkpoint(),
@@ -124,7 +124,7 @@ impl CheckpointKey {
}
}
impl<T: EthSpec> CompactIndexedAttestation<T> {
impl<E: EthSpec> CompactIndexedAttestation<E> {
pub fn signers_disjoint_from(&self, other: &Self) -> bool {
self.aggregation_bits
.intersection(&other.aggregation_bits)
@@ -143,8 +143,8 @@ impl<T: EthSpec> CompactIndexedAttestation<T> {
}
}
impl<T: EthSpec> AttestationMap<T> {
pub fn insert(&mut self, attestation: Attestation<T>, attesting_indices: Vec<u64>) {
impl<E: EthSpec> AttestationMap<E> {
pub fn insert(&mut self, attestation: Attestation<E>, attesting_indices: Vec<u64>) {
let SplitAttestation {
checkpoint,
data,
@@ -176,7 +176,7 @@ impl<T: EthSpec> AttestationMap<T> {
pub fn get_attestations<'a>(
&'a self,
checkpoint_key: &'a CheckpointKey,
) -> impl Iterator<Item = AttestationRef<'a, T>> + 'a {
) -> impl Iterator<Item = AttestationRef<'a, E>> + 'a {
self.checkpoint_map
.get(checkpoint_key)
.into_iter()
@@ -184,7 +184,7 @@ impl<T: EthSpec> AttestationMap<T> {
}
/// Iterate all attestations in the map.
pub fn iter(&self) -> impl Iterator<Item = AttestationRef<T>> {
pub fn iter(&self) -> impl Iterator<Item = AttestationRef<E>> {
self.checkpoint_map
.iter()
.flat_map(|(checkpoint_key, attestation_map)| attestation_map.iter(checkpoint_key))
@@ -211,11 +211,11 @@ impl<T: EthSpec> AttestationMap<T> {
}
}
impl<T: EthSpec> AttestationDataMap<T> {
impl<E: EthSpec> AttestationDataMap<E> {
pub fn iter<'a>(
&'a self,
checkpoint_key: &'a CheckpointKey,
) -> impl Iterator<Item = AttestationRef<'a, T>> + 'a {
) -> impl Iterator<Item = AttestationRef<'a, E>> + 'a {
self.attestations.iter().flat_map(|(data, vec_indexed)| {
vec_indexed.iter().map(|indexed| AttestationRef {
checkpoint: checkpoint_key,

View File

@@ -4,16 +4,16 @@ use std::collections::{HashMap, HashSet};
use types::{AttesterSlashing, BeaconState, EthSpec};
#[derive(Debug, Clone)]
pub struct AttesterSlashingMaxCover<'a, T: EthSpec> {
slashing: &'a AttesterSlashing<T>,
pub struct AttesterSlashingMaxCover<'a, E: EthSpec> {
slashing: &'a AttesterSlashing<E>,
effective_balances: HashMap<u64, u64>,
}
impl<'a, T: EthSpec> AttesterSlashingMaxCover<'a, T> {
impl<'a, E: EthSpec> AttesterSlashingMaxCover<'a, E> {
pub fn new(
slashing: &'a AttesterSlashing<T>,
slashing: &'a AttesterSlashing<E>,
proposer_slashing_indices: &HashSet<u64>,
state: &BeaconState<T>,
state: &BeaconState<E>,
) -> Option<Self> {
let mut effective_balances: HashMap<u64, u64> = HashMap::new();
let epoch = state.current_epoch();
@@ -36,18 +36,18 @@ impl<'a, T: EthSpec> AttesterSlashingMaxCover<'a, T> {
}
}
impl<'a, T: EthSpec> MaxCover for AttesterSlashingMaxCover<'a, T> {
impl<'a, E: EthSpec> MaxCover for AttesterSlashingMaxCover<'a, E> {
/// The result type, of which we would eventually like a collection of maximal quality.
type Object = AttesterSlashing<T>;
type Intermediate = AttesterSlashing<T>;
type Object = AttesterSlashing<E>;
type Intermediate = AttesterSlashing<E>;
/// The type used to represent sets.
type Set = HashMap<u64, u64>;
fn intermediate(&self) -> &AttesterSlashing<T> {
fn intermediate(&self) -> &AttesterSlashing<E> {
self.slashing
}
fn convert_to_object(slashing: &AttesterSlashing<T>) -> AttesterSlashing<T> {
fn convert_to_object(slashing: &AttesterSlashing<E>) -> AttesterSlashing<E> {
slashing.clone()
}
@@ -58,7 +58,7 @@ impl<'a, T: EthSpec> MaxCover for AttesterSlashingMaxCover<'a, T> {
/// Update the set of items covered, for the inclusion of some object in the solution.
fn update_covering_set(
&mut self,
_best_slashing: &AttesterSlashing<T>,
_best_slashing: &AttesterSlashing<E>,
covered_validator_indices: &HashMap<u64, u64>,
) {
self.effective_balances

View File

@@ -20,17 +20,17 @@ pub enum ReceivedPreCapella {
/// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork,
/// and is less-relevant after that.
#[derive(Debug, Default)]
pub struct BlsToExecutionChanges<T: EthSpec> {
pub struct BlsToExecutionChanges<E: EthSpec> {
/// Map from validator index to BLS to execution change.
by_validator_index: HashMap<u64, Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>>,
by_validator_index: HashMap<u64, Arc<SigVerifiedOp<SignedBlsToExecutionChange, E>>>,
/// Last-in-first-out (LIFO) queue of verified messages.
queue: Vec<Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>>,
queue: Vec<Arc<SigVerifiedOp<SignedBlsToExecutionChange, E>>>,
/// Contains a set of validator indices which need to have their changes
/// broadcast at the capella epoch.
received_pre_capella_indices: HashSet<u64>,
}
impl<T: EthSpec> BlsToExecutionChanges<T> {
impl<E: EthSpec> BlsToExecutionChanges<E> {
pub fn existing_change_equals(
&self,
address_change: &SignedBlsToExecutionChange,
@@ -42,7 +42,7 @@ impl<T: EthSpec> BlsToExecutionChanges<T> {
pub fn insert(
&mut self,
verified_change: SigVerifiedOp<SignedBlsToExecutionChange, T>,
verified_change: SigVerifiedOp<SignedBlsToExecutionChange, E>,
received_pre_capella: ReceivedPreCapella,
) -> bool {
let validator_index = verified_change.as_inner().message.validator_index;
@@ -64,14 +64,14 @@ impl<T: EthSpec> BlsToExecutionChanges<T> {
/// FIFO ordering, used for persistence to disk.
pub fn iter_fifo(
&self,
) -> impl Iterator<Item = &Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>> {
) -> impl Iterator<Item = &Arc<SigVerifiedOp<SignedBlsToExecutionChange, E>>> {
self.queue.iter()
}
/// LIFO ordering, used for block packing.
pub fn iter_lifo(
&self,
) -> impl Iterator<Item = &Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>> {
) -> impl Iterator<Item = &Arc<SigVerifiedOp<SignedBlsToExecutionChange, E>>> {
self.queue.iter().rev()
}
@@ -80,7 +80,7 @@ impl<T: EthSpec> BlsToExecutionChanges<T> {
/// the caller.
pub fn iter_received_pre_capella(
&self,
) -> impl Iterator<Item = &Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>> {
) -> impl Iterator<Item = &Arc<SigVerifiedOp<SignedBlsToExecutionChange, E>>> {
self.queue.iter().filter(|address_change| {
self.received_pre_capella_indices
.contains(&address_change.as_inner().message.validator_index)
@@ -99,10 +99,10 @@ impl<T: EthSpec> BlsToExecutionChanges<T> {
/// address changes during re-orgs. This is isn't *perfect* so some address changes could
/// still get stuck if there are gnarly re-orgs and the changes can't be widely republished
/// due to the gossip duplicate rules.
pub fn prune<Payload: AbstractExecPayload<T>>(
pub fn prune<Payload: AbstractExecPayload<E>>(
&mut self,
head_block: &SignedBeaconBlock<T, Payload>,
head_state: &BeaconState<T>,
head_block: &SignedBeaconBlock<E, Payload>,
head_state: &BeaconState<E>,
spec: &ChainSpec,
) {
let mut validator_indices_pruned = vec![];

View File

@@ -42,25 +42,25 @@ use types::{
SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator,
};
type SyncContributions<T> = RwLock<HashMap<SyncAggregateId, Vec<SyncCommitteeContribution<T>>>>;
type SyncContributions<E> = RwLock<HashMap<SyncAggregateId, Vec<SyncCommitteeContribution<E>>>>;
#[derive(Default, Debug)]
pub struct OperationPool<T: EthSpec + Default> {
pub struct OperationPool<E: EthSpec + Default> {
/// Map from attestation ID (see below) to vectors of attestations.
attestations: RwLock<AttestationMap<T>>,
attestations: RwLock<AttestationMap<E>>,
/// Map from sync aggregate ID to the best `SyncCommitteeContribution`s seen for that ID.
sync_contributions: SyncContributions<T>,
sync_contributions: SyncContributions<E>,
/// Set of attester slashings, and the fork version they were verified against.
attester_slashings: RwLock<HashSet<SigVerifiedOp<AttesterSlashing<T>, T>>>,
attester_slashings: RwLock<HashSet<SigVerifiedOp<AttesterSlashing<E>, E>>>,
/// Map from proposer index to slashing.
proposer_slashings: RwLock<HashMap<u64, SigVerifiedOp<ProposerSlashing, T>>>,
proposer_slashings: RwLock<HashMap<u64, SigVerifiedOp<ProposerSlashing, E>>>,
/// Map from exiting validator to their exit data.
voluntary_exits: RwLock<HashMap<u64, SigVerifiedOp<SignedVoluntaryExit, T>>>,
voluntary_exits: RwLock<HashMap<u64, SigVerifiedOp<SignedVoluntaryExit, E>>>,
/// Map from credential changing validator to their position in the queue.
bls_to_execution_changes: RwLock<BlsToExecutionChanges<T>>,
bls_to_execution_changes: RwLock<BlsToExecutionChanges<E>>,
/// Reward cache for accelerating attestation packing.
reward_cache: RwLock<RewardCache>,
_phantom: PhantomData<T>,
_phantom: PhantomData<E>,
}
#[derive(Debug, PartialEq)]
@@ -93,7 +93,7 @@ impl From<SyncAggregateError> for OpPoolError {
}
}
impl<T: EthSpec> OperationPool<T> {
impl<E: EthSpec> OperationPool<E> {
/// Create a new operation pool.
pub fn new() -> Self {
Self::default()
@@ -107,7 +107,7 @@ impl<T: EthSpec> OperationPool<T> {
/// This function assumes the given `contribution` is valid.
pub fn insert_sync_contribution(
&self,
contribution: SyncCommitteeContribution<T>,
contribution: SyncCommitteeContribution<E>,
) -> Result<(), OpPoolError> {
let aggregate_id = SyncAggregateId::new(contribution.slot, contribution.beacon_block_root);
let mut contributions = self.sync_contributions.write();
@@ -153,8 +153,8 @@ impl<T: EthSpec> OperationPool<T> {
/// contributions exist at this slot, or else `None`.
pub fn get_sync_aggregate(
&self,
state: &BeaconState<T>,
) -> Result<Option<SyncAggregate<T>>, OpPoolError> {
state: &BeaconState<E>,
) -> Result<Option<SyncAggregate<E>>, OpPoolError> {
// Sync aggregates are formed from the contributions from the previous slot.
let slot = state.slot().saturating_sub(1u64);
let block_root = *state
@@ -197,7 +197,7 @@ impl<T: EthSpec> OperationPool<T> {
/// This function assumes the given `attestation` is valid.
pub fn insert_attestation(
&self,
attestation: Attestation<T>,
attestation: Attestation<E>,
attesting_indices: Vec<u64>,
) -> Result<(), AttestationValidationError> {
self.attestations
@@ -220,18 +220,18 @@ impl<T: EthSpec> OperationPool<T> {
fn get_valid_attestations_for_epoch<'a>(
&'a self,
checkpoint_key: &'a CheckpointKey,
all_attestations: &'a AttestationMap<T>,
state: &'a BeaconState<T>,
all_attestations: &'a AttestationMap<E>,
state: &'a BeaconState<E>,
reward_cache: &'a RewardCache,
total_active_balance: u64,
validity_filter: impl FnMut(&AttestationRef<'a, T>) -> bool + Send,
validity_filter: impl FnMut(&AttestationRef<'a, E>) -> bool + Send,
spec: &'a ChainSpec,
) -> impl Iterator<Item = AttMaxCover<'a, T>> + Send {
) -> impl Iterator<Item = AttMaxCover<'a, E>> + Send {
all_attestations
.get_attestations(checkpoint_key)
.filter(|att| {
att.data.slot + spec.min_attestation_inclusion_delay <= state.slot()
&& state.slot() <= att.data.slot + T::slots_per_epoch()
&& state.slot() <= att.data.slot + E::slots_per_epoch()
})
.filter(validity_filter)
.filter_map(move |att| {
@@ -247,11 +247,11 @@ impl<T: EthSpec> OperationPool<T> {
/// in the operation pool.
pub fn get_attestations(
&self,
state: &BeaconState<T>,
prev_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send,
curr_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send,
state: &BeaconState<E>,
prev_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, E>) -> bool + Send,
curr_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, E>) -> bool + Send,
spec: &ChainSpec,
) -> Result<Vec<Attestation<T>>, OpPoolError> {
) -> Result<Vec<Attestation<E>>, OpPoolError> {
// Attestations for the current fork, which may be from the current or previous epoch.
let (prev_epoch_key, curr_epoch_key) = CheckpointKey::keys_for_state(state);
let all_attestations = self.attestations.read();
@@ -296,12 +296,12 @@ impl<T: EthSpec> OperationPool<T> {
let prev_epoch_limit = if let BeaconState::Base(base_state) = state {
std::cmp::min(
T::MaxPendingAttestations::to_usize()
E::MaxPendingAttestations::to_usize()
.saturating_sub(base_state.previous_epoch_attestations.len()),
T::MaxAttestations::to_usize(),
E::MaxAttestations::to_usize(),
)
} else {
T::MaxAttestations::to_usize()
E::MaxAttestations::to_usize()
};
let (prev_cover, curr_cover) = rayon::join(
@@ -318,7 +318,7 @@ impl<T: EthSpec> OperationPool<T> {
let _timer = metrics::start_timer(&metrics::ATTESTATION_CURR_EPOCH_PACKING_TIME);
maximum_cover(
curr_epoch_att,
T::MaxAttestations::to_usize(),
E::MaxAttestations::to_usize(),
"curr_epoch_attestations",
)
},
@@ -330,7 +330,7 @@ impl<T: EthSpec> OperationPool<T> {
Ok(max_cover::merge_solutions(
curr_cover,
prev_cover,
T::MaxAttestations::to_usize(),
E::MaxAttestations::to_usize(),
))
}
@@ -342,7 +342,7 @@ impl<T: EthSpec> OperationPool<T> {
/// Insert a proposer slashing into the pool.
pub fn insert_proposer_slashing(
&self,
verified_proposer_slashing: SigVerifiedOp<ProposerSlashing, T>,
verified_proposer_slashing: SigVerifiedOp<ProposerSlashing, E>,
) {
self.proposer_slashings.write().insert(
verified_proposer_slashing.as_inner().proposer_index(),
@@ -353,7 +353,7 @@ impl<T: EthSpec> OperationPool<T> {
/// Insert an attester slashing into the pool.
pub fn insert_attester_slashing(
&self,
verified_slashing: SigVerifiedOp<AttesterSlashing<T>, T>,
verified_slashing: SigVerifiedOp<AttesterSlashing<E>, E>,
) {
self.attester_slashings.write().insert(verified_slashing);
}
@@ -365,11 +365,11 @@ impl<T: EthSpec> OperationPool<T> {
/// earlier in the block.
pub fn get_slashings_and_exits(
&self,
state: &BeaconState<T>,
state: &BeaconState<E>,
spec: &ChainSpec,
) -> (
Vec<ProposerSlashing>,
Vec<AttesterSlashing<T>>,
Vec<AttesterSlashing<E>>,
Vec<SignedVoluntaryExit>,
) {
let proposer_slashings = filter_limit_operations(
@@ -382,7 +382,7 @@ impl<T: EthSpec> OperationPool<T> {
.map_or(false, |validator| !validator.slashed)
},
|slashing| slashing.as_inner().clone(),
T::MaxProposerSlashings::to_usize(),
E::MaxProposerSlashings::to_usize(),
);
// Set of validators to be slashed, so we don't attempt to construct invalid attester
@@ -408,9 +408,9 @@ impl<T: EthSpec> OperationPool<T> {
/// This function *must* remain private.
fn get_attester_slashings(
&self,
state: &BeaconState<T>,
state: &BeaconState<E>,
to_be_slashed: &mut HashSet<u64>,
) -> Vec<AttesterSlashing<T>> {
) -> Vec<AttesterSlashing<E>> {
let reader = self.attester_slashings.read();
let relevant_attester_slashings = reader.iter().flat_map(|slashing| {
@@ -423,7 +423,7 @@ impl<T: EthSpec> OperationPool<T> {
maximum_cover(
relevant_attester_slashings,
T::MaxAttesterSlashings::to_usize(),
E::MaxAttesterSlashings::to_usize(),
"attester_slashings",
)
.into_iter()
@@ -435,7 +435,7 @@ impl<T: EthSpec> OperationPool<T> {
}
/// Prune proposer slashings for validators which are exited in the finalized epoch.
pub fn prune_proposer_slashings(&self, head_state: &BeaconState<T>) {
pub fn prune_proposer_slashings(&self, head_state: &BeaconState<E>) {
prune_validator_hash_map(
&mut self.proposer_slashings.write(),
|_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch,
@@ -445,7 +445,7 @@ impl<T: EthSpec> OperationPool<T> {
/// Prune attester slashings for all slashed or withdrawn validators, or attestations on another
/// fork.
pub fn prune_attester_slashings(&self, head_state: &BeaconState<T>) {
pub fn prune_attester_slashings(&self, head_state: &BeaconState<E>) {
self.attester_slashings.write().retain(|slashing| {
// Check that the attestation's signature is still valid wrt the fork version.
let signature_ok = slashing.signature_is_still_valid(&head_state.fork());
@@ -476,7 +476,7 @@ impl<T: EthSpec> OperationPool<T> {
}
/// Insert a voluntary exit that has previously been checked elsewhere.
pub fn insert_voluntary_exit(&self, exit: SigVerifiedOp<SignedVoluntaryExit, T>) {
pub fn insert_voluntary_exit(&self, exit: SigVerifiedOp<SignedVoluntaryExit, E>) {
self.voluntary_exits
.write()
.insert(exit.as_inner().message.validator_index, exit);
@@ -485,7 +485,7 @@ impl<T: EthSpec> OperationPool<T> {
/// Get a list of voluntary exits for inclusion in a block.
fn get_voluntary_exits<F>(
&self,
state: &BeaconState<T>,
state: &BeaconState<E>,
filter: F,
spec: &ChainSpec,
) -> Vec<SignedVoluntaryExit>
@@ -501,12 +501,12 @@ impl<T: EthSpec> OperationPool<T> {
.is_ok()
},
|exit| exit.as_inner().clone(),
T::MaxVoluntaryExits::to_usize(),
E::MaxVoluntaryExits::to_usize(),
)
}
/// Prune if validator has already exited at or before the finalized checkpoint of the head.
pub fn prune_voluntary_exits(&self, head_state: &BeaconState<T>) {
pub fn prune_voluntary_exits(&self, head_state: &BeaconState<E>) {
prune_validator_hash_map(
&mut self.voluntary_exits.write(),
// This condition is slightly too loose, since there will be some finalized exits that
@@ -536,7 +536,7 @@ impl<T: EthSpec> OperationPool<T> {
/// Return `true` if the change was inserted.
pub fn insert_bls_to_execution_change(
&self,
verified_change: SigVerifiedOp<SignedBlsToExecutionChange, T>,
verified_change: SigVerifiedOp<SignedBlsToExecutionChange, E>,
received_pre_capella: ReceivedPreCapella,
) -> bool {
self.bls_to_execution_changes
@@ -549,7 +549,7 @@ impl<T: EthSpec> OperationPool<T> {
/// They're in random `HashMap` order, which isn't exactly fair, but isn't unfair either.
pub fn get_bls_to_execution_changes(
&self,
state: &BeaconState<T>,
state: &BeaconState<E>,
spec: &ChainSpec,
) -> Vec<SignedBlsToExecutionChange> {
filter_limit_operations(
@@ -563,7 +563,7 @@ impl<T: EthSpec> OperationPool<T> {
})
},
|address_change| address_change.as_inner().clone(),
T::MaxBlsToExecutionChanges::to_usize(),
E::MaxBlsToExecutionChanges::to_usize(),
)
}
@@ -573,7 +573,7 @@ impl<T: EthSpec> OperationPool<T> {
/// broadcast of messages.
pub fn get_bls_to_execution_changes_received_pre_capella(
&self,
state: &BeaconState<T>,
state: &BeaconState<E>,
spec: &ChainSpec,
) -> Vec<SignedBlsToExecutionChange> {
let mut changes = filter_limit_operations(
@@ -604,10 +604,10 @@ impl<T: EthSpec> OperationPool<T> {
}
/// Prune BLS to execution changes that have been applied to the state more than 1 block ago.
pub fn prune_bls_to_execution_changes<Payload: AbstractExecPayload<T>>(
pub fn prune_bls_to_execution_changes<Payload: AbstractExecPayload<E>>(
&self,
head_block: &SignedBeaconBlock<T, Payload>,
head_state: &BeaconState<T>,
head_block: &SignedBeaconBlock<E, Payload>,
head_state: &BeaconState<E>,
spec: &ChainSpec,
) {
self.bls_to_execution_changes
@@ -616,10 +616,10 @@ impl<T: EthSpec> OperationPool<T> {
}
/// Prune all types of transactions given the latest head state and head fork.
pub fn prune_all<Payload: AbstractExecPayload<T>>(
pub fn prune_all<Payload: AbstractExecPayload<E>>(
&self,
head_block: &SignedBeaconBlock<T, Payload>,
head_state: &BeaconState<T>,
head_block: &SignedBeaconBlock<E, Payload>,
head_state: &BeaconState<E>,
current_epoch: Epoch,
spec: &ChainSpec,
) {
@@ -639,7 +639,7 @@ impl<T: EthSpec> OperationPool<T> {
/// Returns all known `Attestation` objects.
///
/// This method may return objects that are invalid for block inclusion.
pub fn get_all_attestations(&self) -> Vec<Attestation<T>> {
pub fn get_all_attestations(&self) -> Vec<Attestation<E>> {
self.attestations
.read()
.iter()
@@ -650,7 +650,7 @@ impl<T: EthSpec> OperationPool<T> {
/// Returns all known `Attestation` objects that pass the provided filter.
///
/// This method may return objects that are invalid for block inclusion.
pub fn get_filtered_attestations<F>(&self, filter: F) -> Vec<Attestation<T>>
pub fn get_filtered_attestations<F>(&self, filter: F) -> Vec<Attestation<E>>
where
F: Fn(&AttestationData) -> bool,
{
@@ -665,7 +665,7 @@ impl<T: EthSpec> OperationPool<T> {
/// Returns all known `AttesterSlashing` objects.
///
/// This method may return objects that are invalid for block inclusion.
pub fn get_all_attester_slashings(&self) -> Vec<AttesterSlashing<T>> {
pub fn get_all_attester_slashings(&self) -> Vec<AttesterSlashing<E>> {
self.attester_slashings
.read()
.iter()
@@ -751,7 +751,7 @@ fn prune_validator_hash_map<T, F, E: EthSpec>(
}
/// Compare two operation pools.
impl<T: EthSpec + Default> PartialEq for OperationPool<T> {
impl<E: EthSpec + Default> PartialEq for OperationPool<E> {
fn eq(&self, other: &Self) -> bool {
if ptr::eq(self, other) {
return true;

View File

@@ -14,7 +14,7 @@ use std::mem;
use store::{DBColumn, Error as StoreError, StoreItem};
use types::*;
type PersistedSyncContributions<T> = Vec<(SyncAggregateId, Vec<SyncCommitteeContribution<T>>)>;
type PersistedSyncContributions<E> = Vec<(SyncAggregateId, Vec<SyncCommitteeContribution<E>>)>;
/// SSZ-serializable version of `OperationPool`.
///
@@ -30,45 +30,45 @@ type PersistedSyncContributions<T> = Vec<(SyncAggregateId, Vec<SyncCommitteeCont
)]
#[derive(PartialEq, Debug, Encode)]
#[ssz(enum_behaviour = "transparent")]
pub struct PersistedOperationPool<T: EthSpec> {
pub struct PersistedOperationPool<E: EthSpec> {
/// [DEPRECATED] Mapping from attestation ID to attestation mappings.
#[superstruct(only(V5))]
pub attestations_v5: Vec<(AttestationId, Vec<Attestation<T>>)>,
pub attestations_v5: Vec<(AttestationId, Vec<Attestation<E>>)>,
/// Attestations and their attesting indices.
#[superstruct(only(V12, V14, V15))]
pub attestations: Vec<(Attestation<T>, Vec<u64>)>,
pub attestations: Vec<(Attestation<E>, Vec<u64>)>,
/// Mapping from sync contribution ID to sync contributions and aggregate.
pub sync_contributions: PersistedSyncContributions<T>,
pub sync_contributions: PersistedSyncContributions<E>,
/// [DEPRECATED] Attester slashings.
#[superstruct(only(V5))]
pub attester_slashings_v5: Vec<(AttesterSlashing<T>, ForkVersion)>,
pub attester_slashings_v5: Vec<(AttesterSlashing<E>, ForkVersion)>,
/// Attester slashings.
#[superstruct(only(V12, V14, V15))]
pub attester_slashings: Vec<SigVerifiedOp<AttesterSlashing<T>, T>>,
pub attester_slashings: Vec<SigVerifiedOp<AttesterSlashing<E>, E>>,
/// [DEPRECATED] Proposer slashings.
#[superstruct(only(V5))]
pub proposer_slashings_v5: Vec<ProposerSlashing>,
/// Proposer slashings with fork information.
#[superstruct(only(V12, V14, V15))]
pub proposer_slashings: Vec<SigVerifiedOp<ProposerSlashing, T>>,
pub proposer_slashings: Vec<SigVerifiedOp<ProposerSlashing, E>>,
/// [DEPRECATED] Voluntary exits.
#[superstruct(only(V5))]
pub voluntary_exits_v5: Vec<SignedVoluntaryExit>,
/// Voluntary exits with fork information.
#[superstruct(only(V12, V14, V15))]
pub voluntary_exits: Vec<SigVerifiedOp<SignedVoluntaryExit, T>>,
pub voluntary_exits: Vec<SigVerifiedOp<SignedVoluntaryExit, E>>,
/// BLS to Execution Changes
#[superstruct(only(V14, V15))]
pub bls_to_execution_changes: Vec<SigVerifiedOp<SignedBlsToExecutionChange, T>>,
pub bls_to_execution_changes: Vec<SigVerifiedOp<SignedBlsToExecutionChange, E>>,
/// Validator indices with BLS to Execution Changes to be broadcast at the
/// Capella fork.
#[superstruct(only(V15))]
pub capella_bls_change_broadcast_indices: Vec<u64>,
}
impl<T: EthSpec> PersistedOperationPool<T> {
impl<E: EthSpec> PersistedOperationPool<E> {
/// Convert an `OperationPool` into serializable form.
pub fn from_operation_pool(operation_pool: &OperationPool<T>) -> Self {
pub fn from_operation_pool(operation_pool: &OperationPool<E>) -> Self {
let attestations = operation_pool
.attestations
.read()
@@ -135,7 +135,7 @@ impl<T: EthSpec> PersistedOperationPool<T> {
}
/// Reconstruct an `OperationPool`.
pub fn into_operation_pool(mut self) -> Result<OperationPool<T>, OpPoolError> {
pub fn into_operation_pool(mut self) -> Result<OperationPool<E>, OpPoolError> {
let attester_slashings = RwLock::new(self.attester_slashings()?.iter().cloned().collect());
let proposer_slashings = RwLock::new(
self.proposer_slashings()?
@@ -200,7 +200,7 @@ impl<T: EthSpec> PersistedOperationPool<T> {
}
}
impl<T: EthSpec> StoreItem for PersistedOperationPoolV5<T> {
impl<E: EthSpec> StoreItem for PersistedOperationPoolV5<E> {
fn db_column() -> DBColumn {
DBColumn::OpPool
}
@@ -214,7 +214,7 @@ impl<T: EthSpec> StoreItem for PersistedOperationPoolV5<T> {
}
}
impl<T: EthSpec> StoreItem for PersistedOperationPoolV12<T> {
impl<E: EthSpec> StoreItem for PersistedOperationPoolV12<E> {
fn db_column() -> DBColumn {
DBColumn::OpPool
}
@@ -228,7 +228,7 @@ impl<T: EthSpec> StoreItem for PersistedOperationPoolV12<T> {
}
}
impl<T: EthSpec> StoreItem for PersistedOperationPoolV14<T> {
impl<E: EthSpec> StoreItem for PersistedOperationPoolV14<E> {
fn db_column() -> DBColumn {
DBColumn::OpPool
}
@@ -242,7 +242,7 @@ impl<T: EthSpec> StoreItem for PersistedOperationPoolV14<T> {
}
}
impl<T: EthSpec> StoreItem for PersistedOperationPoolV15<T> {
impl<E: EthSpec> StoreItem for PersistedOperationPoolV15<E> {
fn db_column() -> DBColumn {
DBColumn::OpPool
}
@@ -257,7 +257,7 @@ impl<T: EthSpec> StoreItem for PersistedOperationPoolV15<T> {
}
/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`.
impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
impl<E: EthSpec> StoreItem for PersistedOperationPool<E> {
fn db_column() -> DBColumn {
DBColumn::OpPool
}