Final changes for fusaka-devnet-2 (#7655)

Closes #7467.

This PR primarily addresses [the P2P changes](https://github.com/ethereum/EIPs/pull/9840) in [fusaka-devnet-2](https://fusaka-devnet-2.ethpandaops.io/). Specifically:

* [the new `nfd` parameter added to the `ENR`](https://github.com/ethereum/EIPs/pull/9840)
* [the modified `compute_fork_digest()` changes for every BPO fork](https://github.com/ethereum/EIPs/pull/9840)

90% of this PR was absolutely hacked together as fast as possible during the Berlinterop as fast as I could while running between Glamsterdam debates. Luckily, it seems to work. But I was unable to be as careful in avoiding bugs as I usually am. I've cleaned up the things *I remember* wanting to come back and have a closer look at. But still working on this.

Progress:
* [x] get it working on `fusaka-devnet-2`
* [ ] [*optional* disconnect from peers with incorrect `nfd` at the fork boundary](https://github.com/ethereum/consensus-specs/pull/4407) - Can be addressed in a future PR if necessary
* [x] first pass clean-up
* [x] fix up all the broken tests
* [x] final self-review
* [x] more thorough review from people more familiar with affected code
This commit is contained in:
ethDreamer
2025-07-10 16:32:58 -05:00
committed by GitHub
parent 3826fe91f4
commit b43e0b446c
26 changed files with 1047 additions and 581 deletions

View File

@@ -414,7 +414,10 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> EmptyBlock for BeaconBlockAlta
/// Returns an empty Altair block to be used during genesis.
fn empty(spec: &ChainSpec) -> Self {
BeaconBlockAltair {
slot: spec.genesis_slot,
slot: spec
.altair_fork_epoch
.expect("altair enabled")
.start_slot(E::slots_per_epoch()),
proposer_index: 0,
parent_root: Hash256::zero(),
state_root: Hash256::zero(),
@@ -447,7 +450,10 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> BeaconBlockAltair<E, Payload>
sync_committee_bits: BitVector::default(),
};
BeaconBlockAltair {
slot: spec.genesis_slot,
slot: spec
.altair_fork_epoch
.expect("altair enabled")
.start_slot(E::slots_per_epoch()),
proposer_index: 0,
parent_root: Hash256::zero(),
state_root: Hash256::zero(),
@@ -475,7 +481,10 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> EmptyBlock for BeaconBlockBell
/// Returns an empty Bellatrix block to be used during genesis.
fn empty(spec: &ChainSpec) -> Self {
BeaconBlockBellatrix {
slot: spec.genesis_slot,
slot: spec
.bellatrix_fork_epoch
.expect("bellatrix enabled")
.start_slot(E::slots_per_epoch()),
proposer_index: 0,
parent_root: Hash256::zero(),
state_root: Hash256::zero(),
@@ -503,7 +512,10 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> EmptyBlock for BeaconBlockCape
/// Returns an empty Capella block to be used during genesis.
fn empty(spec: &ChainSpec) -> Self {
BeaconBlockCapella {
slot: spec.genesis_slot,
slot: spec
.capella_fork_epoch
.expect("capella enabled")
.start_slot(E::slots_per_epoch()),
proposer_index: 0,
parent_root: Hash256::zero(),
state_root: Hash256::zero(),
@@ -532,7 +544,10 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> EmptyBlock for BeaconBlockDene
/// Returns an empty Deneb block to be used during genesis.
fn empty(spec: &ChainSpec) -> Self {
BeaconBlockDeneb {
slot: spec.genesis_slot,
slot: spec
.deneb_fork_epoch
.expect("deneb enabled")
.start_slot(E::slots_per_epoch()),
proposer_index: 0,
parent_root: Hash256::zero(),
state_root: Hash256::zero(),
@@ -562,7 +577,10 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> EmptyBlock for BeaconBlockElec
/// Returns an empty Electra block to be used during genesis.
fn empty(spec: &ChainSpec) -> Self {
BeaconBlockElectra {
slot: spec.genesis_slot,
slot: spec
.electra_fork_epoch
.expect("electra enabled")
.start_slot(E::slots_per_epoch()),
proposer_index: 0,
parent_root: Hash256::zero(),
state_root: Hash256::zero(),
@@ -593,7 +611,10 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> EmptyBlock for BeaconBlockFulu
/// Returns an empty Fulu block to be used during genesis.
fn empty(spec: &ChainSpec) -> Self {
BeaconBlockFulu {
slot: spec.genesis_slot,
slot: spec
.fulu_fork_epoch
.expect("fulu enabled")
.start_slot(E::slots_per_epoch()),
proposer_index: 0,
parent_root: Hash256::zero(),
state_root: Hash256::zero(),

View File

@@ -2,6 +2,7 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER};
use crate::blob_sidecar::BlobIdentifier;
use crate::data_column_sidecar::DataColumnsByRootIdentifier;
use crate::*;
use ethereum_hashing::hash;
use int_to_bytes::int_to_bytes4;
use safe_arith::{ArithError, SafeArith};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@@ -245,7 +246,7 @@ pub struct ChainSpec {
/*
* Networking Fulu
*/
blob_schedule: BlobSchedule,
pub(crate) blob_schedule: BlobSchedule,
min_epochs_for_data_column_sidecars_requests: u64,
/*
@@ -283,27 +284,15 @@ impl ChainSpec {
genesis_validators_root: Hash256,
) -> EnrForkId {
EnrForkId {
fork_digest: self.fork_digest::<E>(slot, genesis_validators_root),
fork_digest: self
.compute_fork_digest(genesis_validators_root, slot.epoch(E::slots_per_epoch())),
next_fork_version: self.next_fork_version::<E>(slot),
next_fork_epoch: self
.next_fork_epoch::<E>(slot)
.map(|(_, e)| e)
.next_digest_epoch(slot.epoch(E::slots_per_epoch()))
.unwrap_or(self.far_future_epoch),
}
}
/// Returns the `ForkDigest` for the given slot.
///
/// If `self.altair_fork_epoch == None`, then this function returns the genesis fork digest
/// otherwise, returns the fork digest based on the slot.
pub fn fork_digest<E: EthSpec>(&self, slot: Slot, genesis_validators_root: Hash256) -> [u8; 4] {
let fork_name = self.fork_name_at_slot::<E>(slot);
Self::compute_fork_digest(
self.fork_version_for_name(fork_name),
genesis_validators_root,
)
}
/// Returns the `next_fork_version`.
///
/// `next_fork_version = current_fork_version` if no future fork is planned,
@@ -365,6 +354,11 @@ impl ChainSpec {
}
}
// This is `compute_fork_version` in the spec
pub fn fork_version_for_epoch(&self, epoch: Epoch) -> [u8; 4] {
self.fork_version_for_name(self.fork_name_at_epoch(epoch))
}
/// For a given fork name, return the epoch at which it activates.
pub fn fork_epoch(&self, fork_name: ForkName) -> Option<Epoch> {
match fork_name {
@@ -447,8 +441,13 @@ impl ChainSpec {
.is_some_and(|fulu_fork_epoch| block_epoch >= fulu_fork_epoch)
}
/// Returns true if `FULU_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`.
/// Returns true if PeerDAS is scheduled. Alias for [`Self::is_fulu_scheduled`]
pub fn is_peer_das_scheduled(&self) -> bool {
self.is_fulu_scheduled()
}
/// Returns true if `FULU_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`.
pub fn is_fulu_scheduled(&self) -> bool {
self.fulu_fork_epoch
.is_some_and(|fulu_fork_epoch| fulu_fork_epoch != self.far_future_epoch)
}
@@ -556,18 +555,69 @@ impl ChainSpec {
///
/// This is a digest primarily used for domain separation on the p2p layer.
/// 4-bytes suffices for practical separation of forks/chains.
pub fn compute_fork_digest(
current_version: [u8; 4],
genesis_validators_root: Hash256,
) -> [u8; 4] {
let mut result = [0; 4];
let root = Self::compute_fork_data_root(current_version, genesis_validators_root);
result.copy_from_slice(
pub fn compute_fork_digest(&self, genesis_validators_root: Hash256, epoch: Epoch) -> [u8; 4] {
let fork_version = self.fork_version_for_epoch(epoch);
let mut base_digest = [0u8; 4];
let root = Self::compute_fork_data_root(fork_version, genesis_validators_root);
base_digest.copy_from_slice(
root.as_slice()
.get(0..4)
.expect("root hash is at least 4 bytes"),
);
result
let Some(blob_parameters) = self.get_blob_parameters(epoch) else {
return base_digest;
};
match self.fulu_fork_epoch {
Some(fulu_epoch) if epoch >= fulu_epoch => {
// Concatenate epoch and max_blobs_per_block as u64 bytes
let mut input = Vec::with_capacity(16);
input.extend_from_slice(&blob_parameters.epoch.as_u64().to_le_bytes());
input.extend_from_slice(&blob_parameters.max_blobs_per_block.to_le_bytes());
// Hash the concatenated bytes
let hash = hash(&input);
// XOR the base digest with the first 4 bytes of the hash
let mut masked_digest = [0u8; 4];
for (i, (a, b)) in base_digest.iter().zip(hash.iter()).enumerate() {
if let Some(x) = masked_digest.get_mut(i) {
*x = a ^ b;
}
}
masked_digest
}
_ => base_digest,
}
}
pub fn all_digest_epochs(&self) -> impl std::iter::Iterator<Item = Epoch> {
let mut relevant_epochs = ForkName::list_all_fork_epochs(self)
.into_iter()
.filter_map(|(_, epoch)| epoch)
.collect::<std::collections::HashSet<_>>();
if self.is_fulu_scheduled() {
for blob_parameters in &self.blob_schedule {
relevant_epochs.insert(blob_parameters.epoch);
}
}
let mut vec = relevant_epochs.into_iter().collect::<Vec<_>>();
vec.sort();
vec.into_iter()
}
pub fn next_digest_epoch(&self, epoch: Epoch) -> Option<Epoch> {
match self.fulu_fork_epoch {
Some(fulu_epoch) if epoch >= fulu_epoch => self
.all_digest_epochs()
.find(|digest_epoch| *digest_epoch > epoch),
_ => self
.fork_name_at_epoch(epoch)
.next_fork()
.and_then(|fork_name| self.fork_epoch(fork_name)),
}
}
/// Compute a domain by applying the given `fork_version`.
@@ -626,17 +676,6 @@ impl ChainSpec {
}
}
/// Returns the highest possible value for max_request_blocks based on enabled forks.
///
/// This is useful for upper bounds in testing.
pub fn max_request_blocks_upper_bound(&self) -> usize {
if self.deneb_fork_epoch.is_some() {
self.max_request_blocks_deneb as usize
} else {
self.max_request_blocks as usize
}
}
pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize {
if fork_name.electra_enabled() {
self.max_request_blob_sidecars_electra as usize
@@ -672,6 +711,24 @@ impl ChainSpec {
}
}
/// Return the blob parameters at a given epoch.
fn get_blob_parameters(&self, epoch: Epoch) -> Option<BlobParameters> {
match self.fulu_fork_epoch {
Some(fulu_epoch) if epoch >= fulu_epoch => self
.blob_schedule
.blob_parameters_for_epoch(epoch)
.or_else(|| {
Some(BlobParameters {
epoch: self
.electra_fork_epoch
.expect("electra fork epoch must be set if fulu epoch is set"),
max_blobs_per_block: self.max_blobs_per_block_electra,
})
}),
_ => None,
}
}
// TODO(EIP-7892): remove this once we have fork-version changes on BPO forks
pub fn max_blobs_per_block_within_fork(&self, fork_name: ForkName) -> u64 {
if !fork_name.fulu_enabled() {
@@ -1404,29 +1461,29 @@ impl Default for ChainSpec {
#[derive(arbitrary::Arbitrary, Serialize, Deserialize, Debug, PartialEq, Clone)]
#[serde(rename_all = "UPPERCASE")]
pub struct BPOFork {
epoch: Epoch,
pub struct BlobParameters {
pub epoch: Epoch,
#[serde(with = "serde_utils::quoted_u64")]
max_blobs_per_block: u64,
pub max_blobs_per_block: u64,
}
// A wrapper around a vector of BPOFork to ensure that the vector is reverse
// A wrapper around a vector of BlobParameters to ensure that the vector is reverse
// sorted by epoch.
#[derive(arbitrary::Arbitrary, Serialize, Debug, PartialEq, Clone)]
pub struct BlobSchedule(Vec<BPOFork>);
pub struct BlobSchedule(Vec<BlobParameters>);
impl<'de> Deserialize<'de> for BlobSchedule {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let vec = Vec::<BPOFork>::deserialize(deserializer)?;
let vec = Vec::<BlobParameters>::deserialize(deserializer)?;
Ok(BlobSchedule::new(vec))
}
}
impl BlobSchedule {
pub fn new(mut vec: Vec<BPOFork>) -> Self {
pub fn new(mut vec: Vec<BlobParameters>) -> Self {
// reverse sort by epoch
vec.sort_by(|a, b| b.epoch.cmp(&a.epoch));
Self(vec)
@@ -1443,19 +1500,22 @@ impl BlobSchedule {
.map(|entry| entry.max_blobs_per_block)
}
pub fn blob_parameters_for_epoch(&self, epoch: Epoch) -> Option<BlobParameters> {
self.0.iter().find(|entry| epoch >= entry.epoch).cloned()
}
pub const fn default() -> Self {
// TODO(EIP-7892): think about what the default should be
Self(vec![])
}
pub fn as_vec(&self) -> &Vec<BPOFork> {
pub fn as_vec(&self) -> &Vec<BlobParameters> {
&self.0
}
}
impl<'a> IntoIterator for &'a BlobSchedule {
type Item = &'a BPOFork;
type IntoIter = std::slice::Iter<'a, BPOFork>;
type Item = &'a BlobParameters;
type IntoIter = std::slice::Iter<'a, BlobParameters>;
fn into_iter(self) -> Self::IntoIter {
self.0.iter()
@@ -1463,8 +1523,8 @@ impl<'a> IntoIterator for &'a BlobSchedule {
}
impl IntoIterator for BlobSchedule {
type Item = BPOFork;
type IntoIter = std::vec::IntoIter<BPOFork>;
type Item = BlobParameters;
type IntoIter = std::vec::IntoIter<BlobParameters>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
@@ -1691,7 +1751,6 @@ fn default_bellatrix_fork_version() -> [u8; 4] {
}
fn default_capella_fork_version() -> [u8; 4] {
// TODO: determine if the bellatrix example should be copied like this
[0xff, 0xff, 0xff, 0xff]
}
@@ -2528,23 +2587,23 @@ mod yaml_tests {
assert_eq!(
config.blob_schedule.as_vec(),
&vec![
BPOFork {
BlobParameters {
epoch: Epoch::new(1584),
max_blobs_per_block: 20
},
BPOFork {
BlobParameters {
epoch: Epoch::new(1280),
max_blobs_per_block: 9
},
BPOFork {
BlobParameters {
epoch: Epoch::new(1024),
max_blobs_per_block: 18
},
BPOFork {
BlobParameters {
epoch: Epoch::new(768),
max_blobs_per_block: 15
},
BPOFork {
BlobParameters {
epoch: Epoch::new(512),
max_blobs_per_block: 12
},
@@ -2563,6 +2622,88 @@ mod yaml_tests {
assert_eq!(spec.max_blobs_per_block_within_fork(ForkName::Fulu), 20);
}
#[test]
fn blob_schedule_fork_digest() {
let spec_contents = r#"
PRESET_BASE: 'mainnet'
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 384
MIN_GENESIS_TIME: 1748264340
GENESIS_FORK_VERSION: 0x10355025
GENESIS_DELAY: 60
SECONDS_PER_SLOT: 12
SECONDS_PER_ETH1_BLOCK: 12
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
SHARD_COMMITTEE_PERIOD: 256
ETH1_FOLLOW_DISTANCE: 2048
INACTIVITY_SCORE_BIAS: 4
INACTIVITY_SCORE_RECOVERY_RATE: 16
EJECTION_BALANCE: 16000000000
MIN_PER_EPOCH_CHURN_LIMIT: 4
CHURN_LIMIT_QUOTIENT: 65536
MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8
PROPOSER_SCORE_BOOST: 40
REORG_HEAD_WEIGHT_THRESHOLD: 20
REORG_PARENT_WEIGHT_THRESHOLD: 160
REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2
DEPOSIT_CHAIN_ID: 7042643276
DEPOSIT_NETWORK_ID: 7042643276
DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa
ALTAIR_FORK_VERSION: 0x20355025
ALTAIR_FORK_EPOCH: 0
BELLATRIX_FORK_VERSION: 0x30355025
BELLATRIX_FORK_EPOCH: 0
CAPELLA_FORK_VERSION: 0x40355025
CAPELLA_FORK_EPOCH: 0
DENEB_FORK_VERSION: 0x50355025
DENEB_FORK_EPOCH: 0
ELECTRA_FORK_VERSION: 0x60000000
ELECTRA_FORK_EPOCH: 9
FULU_FORK_VERSION: 0x06000000
FULU_FORK_EPOCH: 100
BLOB_SCHEDULE:
- EPOCH: 9
MAX_BLOBS_PER_BLOCK: 9
- EPOCH: 100
MAX_BLOBS_PER_BLOCK: 100
- EPOCH: 150
MAX_BLOBS_PER_BLOCK: 175
- EPOCH: 200
MAX_BLOBS_PER_BLOCK: 200
- EPOCH: 250
MAX_BLOBS_PER_BLOCK: 275
- EPOCH: 300
MAX_BLOBS_PER_BLOCK: 300
"#;
let config: Config =
serde_yaml::from_str(spec_contents).expect("error while deserializing");
let spec =
ChainSpec::from_config::<MainnetEthSpec>(&config).expect("error while creating spec");
let genesis_validators_root = Hash256::from_slice(&[0; 32]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(100));
assert_eq!(digest, [0xdf, 0x67, 0x55, 0x7b]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(101));
assert_eq!(digest, [0xdf, 0x67, 0x55, 0x7b]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(150));
assert_eq!(digest, [0x8a, 0xb3, 0x8b, 0x59]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(199));
assert_eq!(digest, [0x8a, 0xb3, 0x8b, 0x59]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(200));
assert_eq!(digest, [0xd9, 0xb8, 0x14, 0x38]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(201));
assert_eq!(digest, [0xd9, 0xb8, 0x14, 0x38]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(250));
assert_eq!(digest, [0x4e, 0xf3, 0x2a, 0x62]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(299));
assert_eq!(digest, [0x4e, 0xf3, 0x2a, 0x62]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(300));
assert_eq!(digest, [0xca, 0x10, 0x0d, 0x64]);
let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(301));
assert_eq!(digest, [0xca, 0x10, 0x0d, 0x64]);
}
#[test]
fn apply_to_spec() {
let mut spec = ChainSpec::minimal();

View File

@@ -24,10 +24,14 @@ use tree_hash_derive::TreeHash;
TestRandom,
)]
pub struct EnrForkId {
/// Fork digest of the current fork computed from [`ChainSpec::compute_fork_digest`].
#[serde(with = "serde_utils::bytes_4_hex")]
pub fork_digest: [u8; 4],
/// `next_fork_version` is the fork version corresponding to the next planned fork at a future
/// epoch. The fork version will only change for regular forks, not BPO forks.
#[serde(with = "serde_utils::bytes_4_hex")]
pub next_fork_version: [u8; 4],
/// `next_fork_epoch` is the epoch at which the next fork (whether a regular fork or a BPO fork) is planned
pub next_fork_epoch: Epoch,
}

View File

@@ -1,14 +1,39 @@
use parking_lot::RwLock;
use crate::{ChainSpec, EthSpec, ForkName, Hash256, Slot};
use std::collections::HashMap;
use crate::{ChainSpec, Epoch, EthSpec, ForkName, Hash256, Slot};
use std::collections::BTreeMap;
/// Represents a hard fork in the consensus protocol.
///
/// A hard fork can be one of two types:
/// * A named fork (represented by `ForkName`) which introduces protocol changes.
/// * A blob-parameter-only (BPO) fork which only modifies blob parameters.
///
/// For BPO forks, the `fork_name` remains unchanged from the previous fork,
/// but the `fork_epoch` and `fork_digest` will be different to reflect the
/// new blob parameter changes.
#[derive(Debug, Clone)]
pub struct HardFork {
fork_name: ForkName,
fork_epoch: Epoch,
fork_digest: [u8; 4],
}
impl HardFork {
pub fn new(fork_name: ForkName, fork_digest: [u8; 4], fork_epoch: Epoch) -> HardFork {
HardFork {
fork_name,
fork_epoch,
fork_digest,
}
}
}
/// Provides fork specific info like the current fork name and the fork digests corresponding to every valid fork.
#[derive(Debug)]
pub struct ForkContext {
current_fork: RwLock<ForkName>,
fork_to_digest: HashMap<ForkName, [u8; 4]>,
digest_to_fork: HashMap<[u8; 4], ForkName>,
current_fork: RwLock<HardFork>,
epoch_to_forks: BTreeMap<Epoch, HardFork>,
pub spec: ChainSpec,
}
@@ -22,74 +47,233 @@ impl ForkContext {
genesis_validators_root: Hash256,
spec: &ChainSpec,
) -> Self {
let fork_to_digest: HashMap<ForkName, [u8; 4]> = ForkName::list_all()
.into_iter()
.filter_map(|fork| {
if spec.fork_epoch(fork).is_some() {
Some((
fork,
ChainSpec::compute_fork_digest(
spec.fork_version_for_name(fork),
genesis_validators_root,
),
))
} else {
None
}
let epoch_to_forks: BTreeMap<_, _> = spec
.all_digest_epochs()
.map(|epoch| {
let fork_name = spec.fork_name_at_epoch(epoch);
let fork_digest = spec.compute_fork_digest(genesis_validators_root, epoch);
(epoch, HardFork::new(fork_name, fork_digest, epoch))
})
.collect();
let digest_to_fork = fork_to_digest
.clone()
.into_iter()
.map(|(k, v)| (v, k))
.collect();
let current_epoch = current_slot.epoch(E::slots_per_epoch());
let current_fork = epoch_to_forks
.values()
.filter(|&fork| fork.fork_epoch <= current_epoch)
.next_back()
.cloned()
.expect("should match at least genesis epoch");
Self {
current_fork: RwLock::new(spec.fork_name_at_slot::<E>(current_slot)),
fork_to_digest,
digest_to_fork,
current_fork: RwLock::new(current_fork),
epoch_to_forks,
spec: spec.clone(),
}
}
/// Returns `true` if the provided `fork_name` exists in the `ForkContext` object.
pub fn fork_exists(&self, fork_name: ForkName) -> bool {
self.fork_to_digest.contains_key(&fork_name)
self.spec.fork_epoch(fork_name).is_some()
}
/// Returns the `current_fork`.
pub fn current_fork(&self) -> ForkName {
*self.current_fork.read()
/// Returns the current fork name.
pub fn current_fork_name(&self) -> ForkName {
self.current_fork.read().fork_name
}
/// Updates the `current_fork` field to a new fork.
pub fn update_current_fork(&self, new_fork: ForkName) {
*self.current_fork.write() = new_fork;
/// Returns the current fork epoch.
pub fn current_fork_epoch(&self) -> Epoch {
self.current_fork.read().fork_epoch
}
/// Returns the current fork digest.
pub fn current_fork_digest(&self) -> [u8; 4] {
self.current_fork.read().fork_digest
}
/// Returns the next fork digest. If there's no future fork, returns the current fork digest.
pub fn next_fork_digest(&self) -> Option<[u8; 4]> {
let current_fork_epoch = self.current_fork_epoch();
self.epoch_to_forks
.range(current_fork_epoch..)
.nth(1)
.map(|(_, fork)| fork.fork_digest)
}
/// Updates the `digest_epoch` field to a new digest epoch.
pub fn update_current_fork(
&self,
new_fork_name: ForkName,
new_fork_digest: [u8; 4],
new_fork_epoch: Epoch,
) {
debug_assert!(self.epoch_to_forks.contains_key(&new_fork_epoch));
*self.current_fork.write() = HardFork::new(new_fork_name, new_fork_digest, new_fork_epoch);
}
/// Returns the context bytes/fork_digest corresponding to the genesis fork version.
pub fn genesis_context_bytes(&self) -> [u8; 4] {
*self
.fork_to_digest
.get(&ForkName::Base)
.expect("ForkContext must contain genesis context bytes")
self.epoch_to_forks
.first_key_value()
.expect("must contain genesis epoch")
.1
.fork_digest
}
/// Returns the fork type given the context bytes/fork_digest.
/// Returns `None` if context bytes doesn't correspond to any valid `ForkName`.
pub fn from_context_bytes(&self, context: [u8; 4]) -> Option<&ForkName> {
self.digest_to_fork.get(&context)
pub fn get_fork_from_context_bytes(&self, context: [u8; 4]) -> Option<&ForkName> {
self.epoch_to_forks
.values()
.find(|fork| fork.fork_digest == context)
.map(|fork| &fork.fork_name)
}
/// Returns the context bytes/fork_digest corresponding to a fork name.
/// Returns `None` if the `ForkName` has not been initialized.
pub fn to_context_bytes(&self, fork_name: ForkName) -> Option<[u8; 4]> {
self.fork_to_digest.get(&fork_name).cloned()
/// Returns the context bytes/fork_digest corresponding to an epoch.
/// See [`ChainSpec::compute_fork_digest`]
pub fn context_bytes(&self, epoch: Epoch) -> [u8; 4] {
self.epoch_to_forks
.range(..=epoch)
.next_back()
.expect("should match at least genesis epoch")
.1
.fork_digest
}
/// Returns all `fork_digest`s that are currently in the `ForkContext` object.
pub fn all_fork_digests(&self) -> Vec<[u8; 4]> {
self.digest_to_fork.keys().cloned().collect()
self.epoch_to_forks
.values()
.map(|fork| fork.fork_digest)
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::chain_spec::{BlobParameters, BlobSchedule};
use crate::MainnetEthSpec;
type E = MainnetEthSpec;
fn make_chain_spec() -> ChainSpec {
let blob_parameters = vec![
BlobParameters {
epoch: Epoch::new(6),
max_blobs_per_block: 12,
},
BlobParameters {
epoch: Epoch::new(50),
max_blobs_per_block: 24,
},
BlobParameters {
epoch: Epoch::new(100),
max_blobs_per_block: 48,
},
];
let mut spec = E::default_spec();
spec.altair_fork_epoch = Some(Epoch::new(1));
spec.bellatrix_fork_epoch = Some(Epoch::new(2));
spec.capella_fork_epoch = Some(Epoch::new(3));
spec.deneb_fork_epoch = Some(Epoch::new(4));
spec.electra_fork_epoch = Some(Epoch::new(5));
spec.fulu_fork_epoch = Some(Epoch::new(6));
spec.blob_schedule = BlobSchedule::new(blob_parameters);
spec
}
#[test]
fn test_fork_exists() {
let spec = make_chain_spec();
let genesis_root = Hash256::ZERO;
let current_slot = Slot::new(7);
let context = ForkContext::new::<E>(current_slot, genesis_root, &spec);
assert!(context.fork_exists(ForkName::Electra));
assert!(context.fork_exists(ForkName::Fulu));
}
#[test]
fn test_current_fork_name_and_epoch() {
let spec = make_chain_spec();
let electra_epoch = spec.electra_fork_epoch.unwrap();
let electra_slot = electra_epoch.end_slot(E::slots_per_epoch());
let genesis_root = Hash256::ZERO;
let context = ForkContext::new::<E>(electra_slot, genesis_root, &spec);
assert_eq!(context.current_fork_name(), ForkName::Electra);
assert_eq!(context.current_fork_epoch(), electra_epoch);
}
#[test]
fn test_next_fork_digest() {
let spec = make_chain_spec();
let electra_epoch = spec.electra_fork_epoch.unwrap();
let electra_slot = electra_epoch.end_slot(E::slots_per_epoch());
let genesis_root = Hash256::ZERO;
let context = ForkContext::new::<E>(electra_slot, genesis_root, &spec);
let next_digest = context.next_fork_digest().unwrap();
let expected_digest = spec.compute_fork_digest(genesis_root, spec.fulu_fork_epoch.unwrap());
assert_eq!(next_digest, expected_digest);
}
#[test]
fn test_get_fork_from_context_bytes() {
let spec = make_chain_spec();
let genesis_root = Hash256::ZERO;
let current_slot = Slot::new(0);
let context = ForkContext::new::<E>(current_slot, genesis_root, &spec);
let electra_digest = spec.compute_fork_digest(genesis_root, Epoch::new(5));
assert_eq!(
context.get_fork_from_context_bytes(electra_digest),
Some(&ForkName::Electra)
);
let invalid_digest = [9, 9, 9, 9];
assert!(context
.get_fork_from_context_bytes(invalid_digest)
.is_none());
}
#[test]
fn test_context_bytes() {
let spec = make_chain_spec();
let genesis_root = Hash256::ZERO;
let current_slot = Slot::new(0);
let context = ForkContext::new::<E>(current_slot, genesis_root, &spec);
assert_eq!(
context.context_bytes(Epoch::new(0)),
spec.compute_fork_digest(genesis_root, Epoch::new(0))
);
assert_eq!(
context.context_bytes(Epoch::new(12)),
spec.compute_fork_digest(genesis_root, Epoch::new(10))
);
}
#[test]
fn test_all_fork_digests() {
let spec = make_chain_spec();
let genesis_root = Hash256::ZERO;
let current_slot = Slot::new(20);
let context = ForkContext::new::<MainnetEthSpec>(current_slot, genesis_root, &spec);
// Get all enabled fork digests
let fork_digests = context.all_fork_digests();
let expected_digest_count = spec.all_digest_epochs().count();
assert_eq!(fork_digests.len(), expected_digest_count);
}
}

View File

@@ -36,8 +36,6 @@ impl ForkName {
pub fn list_all_fork_epochs(spec: &ChainSpec) -> Vec<(ForkName, Option<Epoch>)> {
ForkName::list_all()
.into_iter()
// Skip Base
.skip(1)
.map(|fork| (fork, spec.fork_epoch(fork)))
.collect()
}

View File

@@ -883,11 +883,25 @@ mod test {
}
}
fn spec_with_all_forks_enabled<E: EthSpec>() -> ChainSpec {
let mut chain_spec = E::default_spec();
chain_spec.altair_fork_epoch = Some(Epoch::new(1));
chain_spec.bellatrix_fork_epoch = Some(Epoch::new(2));
chain_spec.capella_fork_epoch = Some(Epoch::new(3));
chain_spec.deneb_fork_epoch = Some(Epoch::new(4));
chain_spec.electra_fork_epoch = Some(Epoch::new(5));
chain_spec.fulu_fork_epoch = Some(Epoch::new(6));
// check that we have all forks covered
assert!(chain_spec.fork_epoch(ForkName::latest()).is_some());
chain_spec
}
#[test]
fn test_ssz_tagged_signed_beacon_block() {
type E = MainnetEthSpec;
let spec = &E::default_spec();
let spec = &spec_with_all_forks_enabled::<E>();
let sig = Signature::empty();
let blocks = vec![
SignedBeaconBlock::<E>::from_block(