mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-03 00:31:50 +00:00
Remove all prod eth1 related code (#7133)
N/A After the electra fork which includes EIP 6110, the beacon node no longer needs the eth1 bridging mechanism to include new deposits as they are provided by the EL as a `deposit_request`. So after electra + a transition period where the finalized bridge deposits pre-fork are included through the old mechanism, we no longer need the elaborate machinery we had to get deposit contract data from the execution layer. Since holesky has already forked to electra and completed the transition period, this PR basically checks to see if removing all the eth1 related logic leads to any surprises.
This commit is contained in:
@@ -5,21 +5,12 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
environment = { workspace = true }
|
||||
eth1 = { workspace = true }
|
||||
ethereum_hashing = { workspace = true }
|
||||
ethereum_ssz = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
int_to_bytes = { workspace = true }
|
||||
merkle_proof = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
state_processing = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tree_hash = { workspace = true }
|
||||
types = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
eth1_test_rig = { workspace = true }
|
||||
logging = { workspace = true }
|
||||
sensitive_url = { workspace = true }
|
||||
|
||||
@@ -1,461 +0,0 @@
|
||||
pub use crate::common::genesis_deposits;
|
||||
pub use eth1::Config as Eth1Config;
|
||||
|
||||
use eth1::{DepositLog, Eth1Block, Service as Eth1Service};
|
||||
use state_processing::{
|
||||
eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state,
|
||||
per_block_processing::process_operations::apply_deposit, process_activations,
|
||||
};
|
||||
use std::sync::{
|
||||
atomic::{AtomicU64, AtomicUsize, Ordering},
|
||||
Arc,
|
||||
};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{debug, error, info, trace};
|
||||
use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, FixedBytesExtended, Hash256};
|
||||
|
||||
/// The number of blocks that are pulled per request whilst waiting for genesis.
|
||||
const BLOCKS_PER_GENESIS_POLL: usize = 99;
|
||||
|
||||
/// Stats about the eth1 genesis process.
|
||||
pub struct Statistics {
|
||||
highest_processed_block: AtomicU64,
|
||||
active_validator_count: AtomicUsize,
|
||||
total_deposit_count: AtomicUsize,
|
||||
latest_timestamp: AtomicU64,
|
||||
}
|
||||
|
||||
/// Provides a service that connects to some Eth1 HTTP JSON-RPC endpoint and maintains a cache of
|
||||
/// eth1 blocks and deposits, listening for the eth1 block that triggers eth2 genesis and returning
|
||||
/// the genesis `BeaconState`.
|
||||
///
|
||||
/// Is a wrapper around the `Service` struct of the `eth1` crate.
|
||||
#[derive(Clone)]
|
||||
pub struct Eth1GenesisService {
|
||||
/// The underlying service. Access to this object is only required for testing and diagnosis.
|
||||
pub eth1_service: Eth1Service,
|
||||
/// Statistics about genesis progress.
|
||||
stats: Arc<Statistics>,
|
||||
}
|
||||
|
||||
impl Eth1GenesisService {
|
||||
/// Creates a new service. Does not attempt to connect to the Eth1 node.
|
||||
///
|
||||
/// Modifies the given `config` to make it more suitable to the task of listening to genesis.
|
||||
pub fn new(config: Eth1Config, spec: Arc<ChainSpec>) -> Result<Self, String> {
|
||||
let config = Eth1Config {
|
||||
// Truncating the block cache makes searching for genesis more
|
||||
// complicated.
|
||||
block_cache_truncation: None,
|
||||
// Scan large ranges of blocks when awaiting genesis.
|
||||
blocks_per_log_query: 1_000,
|
||||
// Only perform a few log requests each time the eth1 node is polled.
|
||||
//
|
||||
// For small testnets this makes finding genesis much faster,
|
||||
// as it usually happens within 1,000 blocks.
|
||||
max_log_requests_per_update: Some(5),
|
||||
// Only perform a few logs requests each time the eth1 node is polled.
|
||||
//
|
||||
// For small testnets, this is much faster as they do not have
|
||||
// a `MIN_GENESIS_SECONDS`, so after `MIN_GENESIS_VALIDATOR_COUNT`
|
||||
// has been reached only a single block needs to be read.
|
||||
max_blocks_per_update: Some(BLOCKS_PER_GENESIS_POLL),
|
||||
..config
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
eth1_service: Eth1Service::new(config, spec)
|
||||
.map_err(|e| format!("Failed to create eth1 service: {:?}", e))?,
|
||||
stats: Arc::new(Statistics {
|
||||
highest_processed_block: AtomicU64::new(0),
|
||||
active_validator_count: AtomicUsize::new(0),
|
||||
total_deposit_count: AtomicUsize::new(0),
|
||||
latest_timestamp: AtomicU64::new(0),
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the first eth1 block that has enough deposits that it's a (potentially invalid)
|
||||
/// candidate for genesis.
|
||||
fn first_candidate_eth1_block(&self, min_genesis_active_validator_count: usize) -> Option<u64> {
|
||||
if self.eth1_service.deposit_cache_len() < min_genesis_active_validator_count {
|
||||
None
|
||||
} else {
|
||||
self.eth1_service
|
||||
.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.get_log(min_genesis_active_validator_count.saturating_sub(1))
|
||||
.map(|log| log.block_number)
|
||||
}
|
||||
}
|
||||
|
||||
/// Scans the Eth1 chain, returning a genesis state once it has been discovered.
|
||||
///
|
||||
/// ## Returns
|
||||
///
|
||||
/// - `Ok(state)` once the canonical eth2 genesis state has been discovered.
|
||||
/// - `Err(e)` if there is some internal error during updates.
|
||||
pub async fn wait_for_genesis_state<E: EthSpec>(
|
||||
&self,
|
||||
update_interval: Duration,
|
||||
) -> Result<BeaconState<E>, String> {
|
||||
let eth1_service = &self.eth1_service;
|
||||
let spec = eth1_service.chain_spec();
|
||||
|
||||
let mut sync_blocks = false;
|
||||
let mut highest_processed_block = None;
|
||||
|
||||
info!("Importing eth1 deposit logs");
|
||||
|
||||
loop {
|
||||
let update_result = eth1_service
|
||||
.update_deposit_cache(None)
|
||||
.await
|
||||
.map_err(|e| format!("{:?}", e));
|
||||
|
||||
if let Err(e) = update_result {
|
||||
error!(error = e, "Failed to update eth1 deposit cache")
|
||||
}
|
||||
|
||||
self.stats
|
||||
.total_deposit_count
|
||||
.store(eth1_service.deposit_cache_len(), Ordering::Relaxed);
|
||||
|
||||
if !sync_blocks {
|
||||
if let Some(viable_eth1_block) = self
|
||||
.first_candidate_eth1_block(spec.min_genesis_active_validator_count as usize)
|
||||
{
|
||||
info!("Importing eth1 blocks");
|
||||
self.eth1_service.set_lowest_cached_block(viable_eth1_block);
|
||||
sync_blocks = true
|
||||
} else {
|
||||
info!(
|
||||
min_genesis_active_validators = spec.min_genesis_active_validator_count,
|
||||
total_deposits = eth1_service.deposit_cache_len(),
|
||||
valid_deposits = eth1_service.get_raw_valid_signature_count(),
|
||||
"Waiting for more deposits"
|
||||
);
|
||||
|
||||
sleep(update_interval).await;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Download new eth1 blocks into the cache.
|
||||
let blocks_imported = match eth1_service.update_block_cache(None).await {
|
||||
Ok(outcome) => {
|
||||
debug!(
|
||||
latest_block_timestamp = eth1_service.latest_block_timestamp(),
|
||||
cache_head = eth1_service.highest_safe_block(),
|
||||
count = outcome.blocks_imported,
|
||||
"Imported eth1 blocks"
|
||||
);
|
||||
outcome.blocks_imported
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
error = ?e,
|
||||
"Failed to update eth1 block cache"
|
||||
);
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
// Scan the new eth1 blocks, searching for genesis.
|
||||
if let Some(genesis_state) =
|
||||
self.scan_new_blocks::<E>(&mut highest_processed_block, spec)?
|
||||
{
|
||||
info!(
|
||||
genesis_validators = genesis_state
|
||||
.get_active_validator_indices(E::genesis_epoch(), spec)
|
||||
.map_err(|e| format!("Genesis validators error: {:?}", e))?
|
||||
.len(),
|
||||
genesis_time = genesis_state.genesis_time(),
|
||||
"Genesis ceremony complete"
|
||||
);
|
||||
break Ok(genesis_state);
|
||||
}
|
||||
|
||||
// Drop all the scanned blocks as they are no longer required.
|
||||
eth1_service.clear_block_cache();
|
||||
|
||||
// Load some statistics from the atomics.
|
||||
let active_validator_count = self.stats.active_validator_count.load(Ordering::Relaxed);
|
||||
let total_deposit_count = self.stats.total_deposit_count.load(Ordering::Relaxed);
|
||||
let latest_timestamp = self.stats.latest_timestamp.load(Ordering::Relaxed);
|
||||
|
||||
// Perform some logging.
|
||||
if timestamp_can_trigger_genesis(latest_timestamp, spec)? {
|
||||
// Indicate that we are awaiting adequate active validators.
|
||||
if (active_validator_count as u64) < spec.min_genesis_active_validator_count {
|
||||
info!(
|
||||
min_genesis_active_validators = spec.min_genesis_active_validator_count,
|
||||
active_validators = active_validator_count,
|
||||
total_deposits = total_deposit_count,
|
||||
valid_deposits = eth1_service.get_valid_signature_count().unwrap_or(0),
|
||||
"Waiting for more validators"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
genesis_delay = spec.genesis_delay,
|
||||
genesis_time = spec.min_genesis_time,
|
||||
latest_eth1_timestamp = latest_timestamp,
|
||||
"Waiting for adequate eth1 timestamp"
|
||||
);
|
||||
}
|
||||
|
||||
// If we imported the full number of blocks, poll again in a short amount of time.
|
||||
//
|
||||
// We assume that if we imported a large chunk of blocks then we're some distance from
|
||||
// the head and we should sync faster.
|
||||
if blocks_imported >= BLOCKS_PER_GENESIS_POLL {
|
||||
sleep(Duration::from_millis(50)).await;
|
||||
} else {
|
||||
sleep(update_interval).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes any new blocks that have appeared since this function was last run.
|
||||
///
|
||||
/// Blocks are always tested in increasing order, starting with the lowest unknown block
|
||||
/// number in the cache.
|
||||
///
|
||||
/// ## Returns
|
||||
///
|
||||
/// - `Ok(Some(eth1_block))` if a previously-unprocessed block would trigger Eth2 genesis.
|
||||
/// - `Ok(None)` if none of the new blocks would trigger genesis, or there were no new blocks.
|
||||
/// - `Err(_)` if there was some internal error.
|
||||
fn scan_new_blocks<E: EthSpec>(
|
||||
&self,
|
||||
highest_processed_block: &mut Option<u64>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Option<BeaconState<E>>, String> {
|
||||
let eth1_service = &self.eth1_service;
|
||||
|
||||
for block in eth1_service.blocks().read().iter() {
|
||||
// It's possible that the block and deposit caches aren't synced. Ignore any blocks
|
||||
// which are not safe for both caches.
|
||||
//
|
||||
// Don't update the highest processed block since we want to come back and process this
|
||||
// again later.
|
||||
if eth1_service
|
||||
.highest_safe_block()
|
||||
.is_none_or(|n| block.number > n)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore any block that has already been processed or update the highest processed
|
||||
// block.
|
||||
if highest_processed_block.is_some_and(|highest| highest >= block.number) {
|
||||
continue;
|
||||
} else {
|
||||
self.stats
|
||||
.highest_processed_block
|
||||
.store(block.number, Ordering::Relaxed);
|
||||
self.stats
|
||||
.latest_timestamp
|
||||
.store(block.timestamp, Ordering::Relaxed);
|
||||
|
||||
*highest_processed_block = Some(block.number)
|
||||
}
|
||||
|
||||
// Ignore any block with an insufficient timestamp.
|
||||
if !timestamp_can_trigger_genesis(block.timestamp, spec)? {
|
||||
trace!(
|
||||
genesis_delay = spec.genesis_delay,
|
||||
min_genesis_time = spec.min_genesis_time,
|
||||
eth1_block_timestamp = block.timestamp,
|
||||
eth1_block_number = block.number,
|
||||
"Insufficient block timestamp"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let valid_signature_count = eth1_service
|
||||
.get_valid_signature_count_at_block(block.number)
|
||||
.unwrap_or(0);
|
||||
if (valid_signature_count as u64) < spec.min_genesis_active_validator_count {
|
||||
trace!(
|
||||
genesis_delay = spec.genesis_delay,
|
||||
valid_signature_count = valid_signature_count,
|
||||
min_validator_count = spec.min_genesis_active_validator_count,
|
||||
eth1_block_number = block.number,
|
||||
"Insufficient valid signatures"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Generate a potential beacon state for this eth1 block.
|
||||
//
|
||||
// Note: this state is fully valid, some fields have been bypassed to make verification
|
||||
// faster.
|
||||
let state = self.cheap_state_at_eth1_block::<E>(block, spec)?;
|
||||
let active_validator_count = state
|
||||
.get_active_validator_indices(E::genesis_epoch(), spec)
|
||||
.map_err(|e| format!("Genesis validators error: {:?}", e))?
|
||||
.len();
|
||||
|
||||
self.stats
|
||||
.active_validator_count
|
||||
.store(active_validator_count, Ordering::Relaxed);
|
||||
|
||||
if is_valid_genesis_state(&state, spec) {
|
||||
let genesis_state = self
|
||||
.genesis_from_eth1_block(block.clone(), spec)
|
||||
.map_err(|e| format!("Failed to generate valid genesis state : {}", e))?;
|
||||
|
||||
return Ok(Some(genesis_state));
|
||||
} else {
|
||||
trace!(
|
||||
min_genesis_active_validator_count =
|
||||
format!("{}", spec.min_genesis_active_validator_count),
|
||||
active_validators = active_validator_count,
|
||||
eth1_block_number = block.number,
|
||||
"Insufficient active validators"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Produces an eth2 genesis `BeaconState` from the given `eth1_block`. The caller should have
|
||||
/// verified that `eth1_block` produces a valid genesis state.
|
||||
///
|
||||
/// ## Returns
|
||||
///
|
||||
/// - `Ok(genesis_state)`: if all went well.
|
||||
/// - `Err(e)`: if the given `eth1_block` was not a viable block to trigger genesis or there was
|
||||
/// an internal error.
|
||||
fn genesis_from_eth1_block<E: EthSpec>(
|
||||
&self,
|
||||
eth1_block: Eth1Block,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<BeaconState<E>, String> {
|
||||
let deposit_logs = self
|
||||
.eth1_service
|
||||
.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.iter()
|
||||
.take_while(|log| log.block_number <= eth1_block.number)
|
||||
.map(|log| log.deposit_data.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let genesis_state = initialize_beacon_state_from_eth1(
|
||||
eth1_block.hash,
|
||||
eth1_block.timestamp,
|
||||
genesis_deposits(deposit_logs, spec)?,
|
||||
None,
|
||||
spec,
|
||||
)
|
||||
.map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?;
|
||||
|
||||
if is_valid_genesis_state(&genesis_state, spec) {
|
||||
Ok(genesis_state)
|
||||
} else {
|
||||
Err("Generated state was not valid.".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates an incomplete `BeaconState` for some `eth1_block` that can be used for checking
|
||||
/// to see if that `eth1_block` triggers eth2 genesis.
|
||||
///
|
||||
/// ## Notes
|
||||
///
|
||||
/// The returned `BeaconState` should **not** be used as the genesis state, it is
|
||||
/// incomplete.
|
||||
fn cheap_state_at_eth1_block<E: EthSpec>(
|
||||
&self,
|
||||
eth1_block: &Eth1Block,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<BeaconState<E>, String> {
|
||||
let genesis_time = eth2_genesis_time(eth1_block.timestamp, spec)
|
||||
.map_err(|e| format!("Unable to set genesis time: {:?}", e))?;
|
||||
|
||||
let mut state: BeaconState<E> = BeaconState::new(
|
||||
genesis_time,
|
||||
Eth1Data {
|
||||
block_hash: Hash256::zero(),
|
||||
deposit_root: Hash256::zero(),
|
||||
deposit_count: 0,
|
||||
},
|
||||
spec,
|
||||
);
|
||||
|
||||
self.deposit_logs_at_block(eth1_block.number)
|
||||
.iter()
|
||||
.map(|deposit_log| Deposit {
|
||||
// Generate a bogus proof.
|
||||
//
|
||||
// The deposits are coming directly from our own deposit tree to there's no need to
|
||||
// make proofs about their inclusion in it.
|
||||
proof: vec![Hash256::zero(); spec.deposit_contract_tree_depth as usize].into(),
|
||||
data: deposit_log.deposit_data.clone(),
|
||||
})
|
||||
.try_for_each(|deposit| {
|
||||
// Skip proof verification (see comment about bogus proof generation).
|
||||
const PROOF_VERIFICATION: bool = false;
|
||||
|
||||
// Note: presently all the signatures are verified each time this function is
|
||||
// run.
|
||||
//
|
||||
// It would be more efficient to pre-verify signatures, filter out the invalid
|
||||
// ones and disable verification for `process_deposit`.
|
||||
//
|
||||
// Such an optimization would only be useful in a scenario where `MIN_GENESIS_TIME`
|
||||
// is reached _prior_ to `MIN_ACTIVE_VALIDATOR_COUNT`. I suspect this won't be the
|
||||
// case for mainnet, so we defer this optimization.
|
||||
let Deposit { proof, data } = deposit;
|
||||
let proof = if PROOF_VERIFICATION {
|
||||
Some(proof)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
apply_deposit(&mut state, data, proof, true, spec)
|
||||
.map_err(|e| format!("Error whilst processing deposit: {:?}", e))
|
||||
})?;
|
||||
|
||||
process_activations(&mut state, spec)
|
||||
.map_err(|e| format!("Error whilst processing activations: {:?}", e))?;
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
/// Returns all deposit logs included in `block_number` and all prior blocks.
|
||||
fn deposit_logs_at_block(&self, block_number: u64) -> Vec<DepositLog> {
|
||||
self.eth1_service
|
||||
.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.iter()
|
||||
.take_while(|log| log.block_number <= block_number)
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns statistics about eth1 genesis.
|
||||
pub fn statistics(&self) -> &Statistics {
|
||||
&self.stats
|
||||
}
|
||||
|
||||
/// Returns the `Service` contained in `self`.
|
||||
pub fn into_core_service(self) -> Eth1Service {
|
||||
self.eth1_service
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `false` for a timestamp that would result in a genesis time that is earlier than
|
||||
/// `MIN_GENESIS_TIME`.
|
||||
fn timestamp_can_trigger_genesis(timestamp: u64, spec: &ChainSpec) -> Result<bool, String> {
|
||||
eth2_genesis_time(timestamp, spec)
|
||||
.map(|t| t >= spec.min_genesis_time)
|
||||
.map_err(|e| format!("Arith error when during genesis calculation: {:?}", e))
|
||||
}
|
||||
@@ -1,10 +1,6 @@
|
||||
mod common;
|
||||
mod eth1_genesis_service;
|
||||
mod interop;
|
||||
|
||||
pub use eth1::Config as Eth1Config;
|
||||
pub use eth1::Eth1Endpoint;
|
||||
pub use eth1_genesis_service::{Eth1GenesisService, Statistics};
|
||||
pub use interop::{
|
||||
bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1,
|
||||
InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH,
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
#![cfg(test)]
|
||||
use environment::{Environment, EnvironmentBuilder};
|
||||
use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID};
|
||||
use eth1_test_rig::{AnvilEth1Instance, DelayThenDeposit, Middleware};
|
||||
use genesis::{Eth1Config, Eth1GenesisService};
|
||||
use logging::create_test_tracing_subscriber;
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use state_processing::is_valid_genesis_state;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypair, FixedBytesExtended, Hash256, MinimalEthSpec,
|
||||
};
|
||||
|
||||
pub fn new_env() -> Environment<MinimalEthSpec> {
|
||||
create_test_tracing_subscriber();
|
||||
EnvironmentBuilder::minimal()
|
||||
.multi_threaded_tokio_runtime()
|
||||
.expect("should start tokio runtime")
|
||||
.build()
|
||||
.expect("should build env")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let env = new_env();
|
||||
let mut spec = (*env.eth2_config().spec).clone();
|
||||
spec.min_genesis_time = 0;
|
||||
spec.min_genesis_active_validator_count = 8;
|
||||
let spec = Arc::new(spec);
|
||||
|
||||
env.runtime().block_on(async {
|
||||
let eth1 = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into())
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let client = eth1.json_rpc_client();
|
||||
|
||||
let now = client
|
||||
.get_block_number()
|
||||
.await
|
||||
.map(|v| v.as_u64())
|
||||
.expect("should get block number");
|
||||
|
||||
let service = Eth1GenesisService::new(
|
||||
Eth1Config {
|
||||
endpoint: Eth1Endpoint::NoAuth(
|
||||
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||
),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
deposit_contract_deploy_block: now,
|
||||
lowest_cached_block_number: now,
|
||||
follow_distance: 0,
|
||||
block_cache_truncation: None,
|
||||
..Eth1Config::default()
|
||||
},
|
||||
spec.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// NOTE: this test is sensitive to the response speed of the external web3 server. If
|
||||
// you're experiencing failures, try increasing the update_interval.
|
||||
let update_interval = Duration::from_millis(500);
|
||||
|
||||
let deposits = (0..spec.min_genesis_active_validator_count + 2)
|
||||
.map(|i| {
|
||||
deposit_contract.deposit_helper::<MinimalEthSpec>(
|
||||
generate_deterministic_keypair(i as usize),
|
||||
Hash256::from_low_u64_le(i),
|
||||
32_000_000_000,
|
||||
)
|
||||
})
|
||||
.map(|deposit| DelayThenDeposit {
|
||||
delay: Duration::from_secs(0),
|
||||
deposit,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let deposit_future = deposit_contract.deposit_multiple(deposits);
|
||||
|
||||
let wait_future = service.wait_for_genesis_state::<MinimalEthSpec>(update_interval);
|
||||
|
||||
let state = futures::try_join!(deposit_future, wait_future)
|
||||
.map(|(_, state)| state)
|
||||
.expect("should finish waiting for genesis");
|
||||
|
||||
// Note: using anvil these deposits are 1-per-block, therefore we know there should only be
|
||||
// the minimum number of validators.
|
||||
assert_eq!(
|
||||
state.validators().len(),
|
||||
spec.min_genesis_active_validator_count as usize,
|
||||
"should have expected validator count"
|
||||
);
|
||||
|
||||
assert!(state.genesis_time() > 0, "should have some genesis time");
|
||||
|
||||
assert!(
|
||||
is_valid_genesis_state(&state, &spec),
|
||||
"should be valid genesis state"
|
||||
);
|
||||
|
||||
assert!(
|
||||
is_valid_genesis_state(&state, &spec),
|
||||
"should be valid genesis state"
|
||||
);
|
||||
});
|
||||
}
|
||||
Reference in New Issue
Block a user