Merge branch 'stable' into validator-manager

This commit is contained in:
Paul Hauner
2023-07-10 17:19:53 +10:00
272 changed files with 12065 additions and 4316 deletions

View File

@@ -89,7 +89,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T,
}
}
impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> {
impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T, Payload> {
/// Get the fork_name of this object
pub fn fork_name(self) -> ForkName {
match self {

View File

@@ -26,6 +26,8 @@ pub use self::committee_cache::{
compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count,
CommitteeCache,
};
pub use crate::beacon_state::balance::Balance;
pub use crate::beacon_state::progressive_balances_cache::*;
use crate::historical_summary::HistoricalSummary;
pub use clone_config::CloneConfig;
pub use eth_spec::*;
@@ -34,9 +36,11 @@ pub use tree_hash_cache::BeaconTreeHashCache;
#[macro_use]
mod committee_cache;
mod balance;
mod clone_config;
mod exit_cache;
mod iter;
mod progressive_balances_cache;
mod pubkey_cache;
mod tests;
mod tree_hash_cache;
@@ -101,6 +105,9 @@ pub enum Error {
SszTypesError(ssz_types::Error),
TreeHashCacheNotInitialized,
NonLinearTreeHashCacheHistory,
ParticipationCacheError(String),
ProgressiveBalancesCacheNotInitialized,
ProgressiveBalancesCacheInconsistent,
TreeHashCacheSkippedSlot {
cache: Slot,
state: Slot,
@@ -317,6 +324,12 @@ where
#[tree_hash(skip_hashing)]
#[test_random(default)]
#[derivative(Clone(clone_with = "clone_default"))]
pub progressive_balances_cache: ProgressiveBalancesCache,
#[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)]
#[test_random(default)]
#[derivative(Clone(clone_with = "clone_default"))]
pub committee_caches: [CommitteeCache; CACHED_EPOCHS],
#[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing, skip_deserializing)]
@@ -393,6 +406,7 @@ impl<T: EthSpec> BeaconState<T> {
// Caching (not in spec)
total_active_balance: None,
progressive_balances_cache: <_>::default(),
committee_caches: [
CommitteeCache::default(),
CommitteeCache::default(),
@@ -757,7 +771,7 @@ impl<T: EthSpec> BeaconState<T> {
Ok(signature_hash_int.safe_rem(modulo)? == 0)
}
/// Returns the beacon proposer index for the `slot` in the given `relative_epoch`.
/// Returns the beacon proposer index for the `slot` in `self.current_epoch()`.
///
/// Spec v0.12.1
pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result<usize, Error> {
@@ -1150,12 +1164,30 @@ impl<T: EthSpec> BeaconState<T> {
}
/// Convenience accessor for validators and balances simultaneously.
pub fn validators_and_balances_mut(&mut self) -> (&mut [Validator], &mut [u64]) {
pub fn validators_and_balances_and_progressive_balances_mut(
&mut self,
) -> (&mut [Validator], &mut [u64], &mut ProgressiveBalancesCache) {
match self {
BeaconState::Base(state) => (&mut state.validators, &mut state.balances),
BeaconState::Altair(state) => (&mut state.validators, &mut state.balances),
BeaconState::Merge(state) => (&mut state.validators, &mut state.balances),
BeaconState::Capella(state) => (&mut state.validators, &mut state.balances),
BeaconState::Base(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Altair(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Merge(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Capella(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
}
}
@@ -1380,7 +1412,7 @@ impl<T: EthSpec> BeaconState<T> {
}
/// Build all caches (except the tree hash cache), if they need to be built.
pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
self.build_all_committee_caches(spec)?;
self.update_pubkey_cache()?;
self.build_exit_cache(spec)?;
@@ -1412,6 +1444,7 @@ impl<T: EthSpec> BeaconState<T> {
self.drop_committee_cache(RelativeEpoch::Next)?;
self.drop_pubkey_cache();
self.drop_tree_hash_cache();
self.drop_progressive_balances_cache();
*self.exit_cache_mut() = ExitCache::default();
Ok(())
}
@@ -1608,6 +1641,11 @@ impl<T: EthSpec> BeaconState<T> {
*self.pubkey_cache_mut() = PubkeyCache::default()
}
/// Completely drops the `progressive_balances_cache` cache, replacing it with a new, empty cache.
fn drop_progressive_balances_cache(&mut self) {
*self.progressive_balances_cache_mut() = ProgressiveBalancesCache::default();
}
/// Initialize but don't fill the tree hash cache, if it isn't already initialized.
pub fn initialize_tree_hash_cache(&mut self) {
if !self.tree_hash_cache().is_initialized() {
@@ -1679,6 +1717,9 @@ impl<T: EthSpec> BeaconState<T> {
if config.tree_hash_cache {
*res.tree_hash_cache_mut() = self.tree_hash_cache().clone();
}
if config.progressive_balances_cache {
*res.progressive_balances_cache_mut() = self.progressive_balances_cache().clone();
}
res
}

View File

@@ -0,0 +1,33 @@
use arbitrary::Arbitrary;
use safe_arith::{ArithError, SafeArith};
/// A balance which will never be below the specified `minimum`.
///
/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected.
#[derive(PartialEq, Debug, Clone, Copy, Arbitrary)]
pub struct Balance {
raw: u64,
minimum: u64,
}
impl Balance {
/// Initialize the balance to `0`, or the given `minimum`.
pub fn zero(minimum: u64) -> Self {
Self { raw: 0, minimum }
}
/// Returns the balance with respect to the initialization `minimum`.
pub fn get(&self) -> u64 {
std::cmp::max(self.raw, self.minimum)
}
/// Add-assign to the balance.
pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> {
self.raw.safe_add_assign(other)
}
/// Sub-assign to the balance.
pub fn safe_sub_assign(&mut self, other: u64) -> Result<(), ArithError> {
self.raw.safe_sub_assign(other)
}
}

View File

@@ -5,6 +5,7 @@ pub struct CloneConfig {
pub pubkey_cache: bool,
pub exit_cache: bool,
pub tree_hash_cache: bool,
pub progressive_balances_cache: bool,
}
impl CloneConfig {
@@ -14,6 +15,7 @@ impl CloneConfig {
pubkey_cache: true,
exit_cache: true,
tree_hash_cache: true,
progressive_balances_cache: true,
}
}

View File

@@ -0,0 +1,184 @@
use crate::beacon_state::balance::Balance;
use crate::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec};
use arbitrary::Arbitrary;
use safe_arith::SafeArith;
use serde_derive::{Deserialize, Serialize};
use strum::{Display, EnumString, EnumVariantNames};
/// This cache keeps track of the accumulated target attestation balance for the current & previous
/// epochs. The cached values can be utilised by fork choice to calculate unrealized justification
/// and finalization instead of converting epoch participation arrays to balances for each block we
/// process.
#[derive(Default, Debug, PartialEq, Arbitrary, Clone)]
pub struct ProgressiveBalancesCache {
inner: Option<Inner>,
}
#[derive(Debug, PartialEq, Arbitrary, Clone)]
struct Inner {
pub current_epoch: Epoch,
pub previous_epoch_target_attesting_balance: Balance,
pub current_epoch_target_attesting_balance: Balance,
}
impl ProgressiveBalancesCache {
pub fn initialize(
&mut self,
current_epoch: Epoch,
previous_epoch_target_attesting_balance: Balance,
current_epoch_target_attesting_balance: Balance,
) {
self.inner = Some(Inner {
current_epoch,
previous_epoch_target_attesting_balance,
current_epoch_target_attesting_balance,
});
}
pub fn is_initialized(&self) -> bool {
self.inner.is_some()
}
/// When a new target attestation has been processed, we update the cached
/// `current_epoch_target_attesting_balance` to include the validator effective balance.
/// If the epoch is neither the current epoch nor the previous epoch, an error is returned.
pub fn on_new_target_attestation(
&mut self,
epoch: Epoch,
validator_effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if epoch == cache.current_epoch {
cache
.current_epoch_target_attesting_balance
.safe_add_assign(validator_effective_balance)?;
} else if epoch.safe_add(1)? == cache.current_epoch {
cache
.previous_epoch_target_attesting_balance
.safe_add_assign(validator_effective_balance)?;
} else {
return Err(BeaconStateError::ProgressiveBalancesCacheInconsistent);
}
Ok(())
}
/// When a validator is slashed, we reduce the `current_epoch_target_attesting_balance` by the
/// validator's effective balance to exclude the validator weight.
pub fn on_slashing(
&mut self,
is_previous_epoch_target_attester: bool,
is_current_epoch_target_attester: bool,
effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if is_previous_epoch_target_attester {
cache
.previous_epoch_target_attesting_balance
.safe_sub_assign(effective_balance)?;
}
if is_current_epoch_target_attester {
cache
.current_epoch_target_attesting_balance
.safe_sub_assign(effective_balance)?;
}
Ok(())
}
/// When a current epoch target attester has its effective balance changed, we adjust the
/// its share of the target attesting balance in the cache.
pub fn on_effective_balance_change(
&mut self,
is_current_epoch_target_attester: bool,
old_effective_balance: u64,
new_effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if is_current_epoch_target_attester {
if new_effective_balance > old_effective_balance {
cache
.current_epoch_target_attesting_balance
.safe_add_assign(new_effective_balance.safe_sub(old_effective_balance)?)?;
} else {
cache
.current_epoch_target_attesting_balance
.safe_sub_assign(old_effective_balance.safe_sub(new_effective_balance)?)?;
}
}
Ok(())
}
/// On epoch transition, the balance from current epoch is shifted to previous epoch, and the
/// current epoch balance is reset to 0.
pub fn on_epoch_transition(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
cache.current_epoch.safe_add_assign(1)?;
cache.previous_epoch_target_attesting_balance =
cache.current_epoch_target_attesting_balance;
cache.current_epoch_target_attesting_balance =
Balance::zero(spec.effective_balance_increment);
Ok(())
}
pub fn previous_epoch_target_attesting_balance(&self) -> Result<u64, BeaconStateError> {
Ok(self
.get_inner()?
.previous_epoch_target_attesting_balance
.get())
}
pub fn current_epoch_target_attesting_balance(&self) -> Result<u64, BeaconStateError> {
Ok(self
.get_inner()?
.current_epoch_target_attesting_balance
.get())
}
fn get_inner_mut(&mut self) -> Result<&mut Inner, BeaconStateError> {
self.inner
.as_mut()
.ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized)
}
fn get_inner(&self) -> Result<&Inner, BeaconStateError> {
self.inner
.as_ref()
.ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized)
}
}
#[derive(
Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize, Display, EnumString, EnumVariantNames,
)]
#[strum(serialize_all = "lowercase")]
pub enum ProgressiveBalancesMode {
/// Disable the usage of progressive cache, and use the existing `ParticipationCache` calculation.
Disabled,
/// Enable the usage of progressive cache, with checks against the `ParticipationCache` and falls
/// back to the existing calculation if there is a balance mismatch.
Checked,
/// Enable the usage of progressive cache, with checks against the `ParticipationCache`. Errors
/// if there is a balance mismatch. Used in testing only.
Strict,
/// Enable the usage of progressive cache, with no comparative checks against the
/// `ParticipationCache`. This is fast but an experimental mode, use with caution.
Fast,
}
impl ProgressiveBalancesMode {
pub fn perform_comparative_checks(&self) -> bool {
match self {
Self::Disabled | Self::Fast => false,
Self::Checked | Self::Strict => true,
}
}
}
/// `ProgressiveBalancesCache` is only enabled from `Altair` as it requires `ParticipationCache`.
pub fn is_progressive_balances_enabled<E: EthSpec>(state: &BeaconState<E>) -> bool {
match state {
BeaconState::Base(_) => false,
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => true,
}
}

View File

@@ -219,17 +219,18 @@ async fn clone_config() {
let mut state = build_state::<MinimalEthSpec>(16).await;
state.build_all_caches(&spec).unwrap();
state.build_caches(&spec).unwrap();
state
.update_tree_hash_cache()
.expect("should update tree hash cache");
let num_caches = 4;
let num_caches = 5;
let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig {
committee_caches: (i & 1) != 0,
pubkey_cache: ((i >> 1) & 1) != 0,
exit_cache: ((i >> 2) & 1) != 0,
tree_hash_cache: ((i >> 3) & 1) != 0,
progressive_balances_cache: ((i >> 4) & 1) != 0,
});
for config in all_configs {

View File

@@ -168,11 +168,9 @@ pub struct ChainSpec {
pub maximum_gossip_clock_disparity_millis: u64,
pub target_aggregators_per_committee: u64,
pub attestation_subnet_count: u64,
pub random_subnets_per_validator: u64,
pub epochs_per_random_subnet_subscription: u64,
pub subnets_per_node: u8,
pub epochs_per_subnet_subscription: u64,
attestation_subnet_extra_bits: u8,
pub attestation_subnet_extra_bits: u8,
/*
* Application params
@@ -455,17 +453,7 @@ impl ChainSpec {
#[allow(clippy::integer_arithmetic)]
pub const fn attestation_subnet_prefix_bits(&self) -> u32 {
// maybe use log2 when stable https://github.com/rust-lang/rust/issues/70887
// NOTE: this line is here simply to guarantee that if self.attestation_subnet_count type
// is changed, a compiler warning will be raised. This code depends on the type being u64.
let attestation_subnet_count: u64 = self.attestation_subnet_count;
let attestation_subnet_count_bits = if attestation_subnet_count == 0 {
0
} else {
63 - attestation_subnet_count.leading_zeros()
};
let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2();
self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits
}
@@ -625,13 +613,11 @@ impl ChainSpec {
network_id: 1, // mainnet network id
attestation_propagation_slot_range: 32,
attestation_subnet_count: 64,
random_subnets_per_validator: 1,
subnets_per_node: 1,
subnets_per_node: 2,
maximum_gossip_clock_disparity_millis: 500,
target_aggregators_per_committee: 16,
epochs_per_random_subnet_subscription: 256,
epochs_per_subnet_subscription: 256,
attestation_subnet_extra_bits: 6,
attestation_subnet_extra_bits: 0,
/*
* Application specific
@@ -842,8 +828,8 @@ impl ChainSpec {
* Capella hard fork params
*/
capella_fork_version: [0x03, 0x00, 0x00, 0x64],
capella_fork_epoch: None,
max_validators_per_withdrawals_sweep: 16384,
capella_fork_epoch: Some(Epoch::new(648704)),
max_validators_per_withdrawals_sweep: 8192,
/*
* Network specific
@@ -852,13 +838,11 @@ impl ChainSpec {
network_id: 100, // Gnosis Chain network id
attestation_propagation_slot_range: 32,
attestation_subnet_count: 64,
random_subnets_per_validator: 1,
subnets_per_node: 1,
subnets_per_node: 4, // Make this larger than usual to avoid network damage
maximum_gossip_clock_disparity_millis: 500,
target_aggregators_per_committee: 16,
epochs_per_random_subnet_subscription: 256,
epochs_per_subnet_subscription: 256,
attestation_subnet_extra_bits: 6,
attestation_subnet_extra_bits: 0,
/*
* Application specific
@@ -946,6 +930,9 @@ pub struct Config {
shard_committee_period: u64,
#[serde(with = "serde_utils::quoted_u64")]
eth1_follow_distance: u64,
#[serde(default = "default_subnets_per_node")]
#[serde(with = "serde_utils::quoted_u8")]
subnets_per_node: u8,
#[serde(with = "serde_utils::quoted_u64")]
inactivity_score_bias: u64,
@@ -1002,6 +989,10 @@ fn default_safe_slots_to_import_optimistically() -> u64 {
128u64
}
fn default_subnets_per_node() -> u8 {
2u8
}
impl Default for Config {
fn default() -> Self {
let chain_spec = MainnetEthSpec::default_spec();
@@ -1084,6 +1075,7 @@ impl Config {
min_validator_withdrawability_delay: spec.min_validator_withdrawability_delay,
shard_committee_period: spec.shard_committee_period,
eth1_follow_distance: spec.eth1_follow_distance,
subnets_per_node: spec.subnets_per_node,
inactivity_score_bias: spec.inactivity_score_bias,
inactivity_score_recovery_rate: spec.inactivity_score_recovery_rate,
@@ -1130,6 +1122,7 @@ impl Config {
min_validator_withdrawability_delay,
shard_committee_period,
eth1_follow_distance,
subnets_per_node,
inactivity_score_bias,
inactivity_score_recovery_rate,
ejection_balance,
@@ -1162,6 +1155,7 @@ impl Config {
min_validator_withdrawability_delay,
shard_committee_period,
eth1_follow_distance,
subnets_per_node,
inactivity_score_bias,
inactivity_score_recovery_rate,
ejection_balance,

View File

@@ -86,10 +86,6 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap<String, Value> {
"domain_application_mask".to_uppercase()=> u32_hex(spec.domain_application_mask),
"target_aggregators_per_committee".to_uppercase() =>
spec.target_aggregators_per_committee.to_string().into(),
"random_subnets_per_validator".to_uppercase() =>
spec.random_subnets_per_validator.to_string().into(),
"epochs_per_random_subnet_subscription".to_uppercase() =>
spec.epochs_per_random_subnet_subscription.to_string().into(),
"domain_contribution_and_proof".to_uppercase() =>
u32_hex(spec.domain_contribution_and_proof),
"domain_sync_committee".to_uppercase() => u32_hex(spec.domain_sync_committee),

View File

@@ -30,8 +30,10 @@ impl From<&DepositTreeSnapshot> for FinalizedExecutionBlock {
pub struct DepositTreeSnapshot {
pub finalized: Vec<Hash256>,
pub deposit_root: Hash256,
#[serde(with = "serde_utils::quoted_u64")]
pub deposit_count: u64,
pub execution_block_hash: Hash256,
#[serde(with = "serde_utils::quoted_u64")]
pub execution_block_height: u64,
}

View File

@@ -373,7 +373,7 @@ impl EthSpec for GnosisEthSpec {
type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch
type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch
type MaxBlsToExecutionChanges = U16;
type MaxWithdrawalsPerPayload = U16;
type MaxWithdrawalsPerPayload = U8;
fn default_spec() -> ChainSpec {
ChainSpec::gnosis()

View File

@@ -24,6 +24,11 @@ impl ForkName {
]
}
pub fn latest() -> ForkName {
// This unwrap is safe as long as we have 1+ forks. It is tested below.
*ForkName::list_all().last().unwrap()
}
/// Set the activation slots in the given `ChainSpec` so that the fork named by `self`
/// is the only fork in effect from genesis.
pub fn make_genesis_spec(&self, mut spec: ChainSpec) -> ChainSpec {
@@ -178,7 +183,7 @@ mod test {
#[test]
fn previous_and_next_fork_consistent() {
assert_eq!(ForkName::Capella.next_fork(), None);
assert_eq!(ForkName::latest().next_fork(), None);
assert_eq!(ForkName::Base.previous_fork(), None);
for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() {
@@ -211,4 +216,15 @@ mod test {
assert_eq!(ForkName::from_str("merge"), Ok(ForkName::Merge));
assert_eq!(ForkName::Merge.to_string(), "bellatrix");
}
#[test]
fn fork_name_latest() {
assert_eq!(ForkName::latest(), *ForkName::list_all().last().unwrap());
let mut fork = ForkName::Base;
while let Some(next_fork) = fork.next_fork() {
fork = next_fork;
}
assert_eq!(ForkName::latest(), fork);
}
}

View File

@@ -80,15 +80,26 @@ impl SubnetId {
epoch: Epoch,
spec: &ChainSpec,
) -> Result<(impl Iterator<Item = SubnetId>, Epoch), &'static str> {
// Simplify the variable name
let subscription_duration = spec.epochs_per_subnet_subscription;
let node_id_prefix =
(node_id >> (256 - spec.attestation_subnet_prefix_bits() as usize)).as_usize();
let subscription_event_idx = epoch.as_u64() / spec.epochs_per_subnet_subscription;
// NOTE: The as_u64() panics if the number is larger than u64::max_value(). This cannot be
// true as spec.epochs_per_subnet_subscription is a u64.
let node_offset = (node_id % ethereum_types::U256::from(subscription_duration)).as_u64();
// Calculate at which epoch this node needs to re-evaluate
let valid_until_epoch = epoch.as_u64()
+ subscription_duration
.saturating_sub((epoch.as_u64() + node_offset) % subscription_duration);
let subscription_event_idx = (epoch.as_u64() + node_offset) / subscription_duration;
let permutation_seed =
ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx));
let num_subnets = 1 << spec.attestation_subnet_prefix_bits();
let permutated_prefix = compute_shuffled_index(
node_id_prefix,
num_subnets,
@@ -107,7 +118,6 @@ impl SubnetId {
let subnet_set_generator = (0..subnets_per_node).map(move |idx| {
SubnetId::new((permutated_prefix + idx as u64) % attestation_subnet_count)
});
let valid_until_epoch = (subscription_event_idx + 1) * spec.epochs_per_subnet_subscription;
Ok((subnet_set_generator, valid_until_epoch.into()))
}
}
@@ -149,3 +159,80 @@ impl AsRef<str> for SubnetId {
subnet_id_to_string(self.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
/// A set of tests compared to the python specification
#[test]
fn compute_subnets_for_epoch_unit_test() {
// Randomized variables used generated with the python specification
let node_ids = [
"0",
"88752428858350697756262172400162263450541348766581994718383409852729519486397",
"18732750322395381632951253735273868184515463718109267674920115648614659369468",
"27726842142488109545414954493849224833670205008410190955613662332153332462900",
"39755236029158558527862903296867805548949739810920318269566095185775868999998",
"31899136003441886988955119620035330314647133604576220223892254902004850516297",
"58579998103852084482416614330746509727562027284701078483890722833654510444626",
"28248042035542126088870192155378394518950310811868093527036637864276176517397",
"60930578857433095740782970114409273483106482059893286066493409689627770333527",
"103822458477361691467064888613019442068586830412598673713899771287914656699997",
]
.into_iter()
.map(|v| ethereum_types::U256::from_dec_str(v).unwrap())
.collect::<Vec<_>>();
let epochs = [
54321u64, 1017090249, 1827566880, 846255942, 766597383, 1204990115, 1616209495,
1774367616, 1484598751, 3525502229,
]
.into_iter()
.map(Epoch::from)
.collect::<Vec<_>>();
// Test mainnet
let spec = ChainSpec::mainnet();
// Calculated by hand
let expected_valid_time: Vec<u64> = [
54528, 1017090371, 1827567108, 846256076, 766597570, 1204990135, 1616209582,
1774367723, 1484598953, 3525502371,
]
.into();
// Calculated from pyspec
let expected_subnets = vec![
vec![4u64, 5u64],
vec![61, 62],
vec![23, 24],
vec![38, 39],
vec![53, 54],
vec![39, 40],
vec![48, 49],
vec![39, 40],
vec![34, 35],
vec![37, 38],
];
for x in 0..node_ids.len() {
println!("Test: {}", x);
println!(
"NodeId: {}\n Epoch: {}\n, expected_update_time: {}\n, expected_subnets: {:?}",
node_ids[x], epochs[x], expected_valid_time[x], expected_subnets[x]
);
let (computed_subnets, valid_time) = SubnetId::compute_subnets_for_epoch::<
crate::MainnetEthSpec,
>(node_ids[x], epochs[x], &spec)
.unwrap();
assert_eq!(Epoch::from(expected_valid_time[x]), valid_time);
assert_eq!(
expected_subnets[x],
computed_subnets.map(SubnetId::into).collect::<Vec<u64>>()
);
}
}
}