Add Electra forks to basic sim tests (#7199)

This PR adds transitions to Electra ~~and Fulu~~ fork epochs in the simulator tests.

~~It also covers blob inclusion verification and data column syncing on a full node in Fulu.~~

UPDATE: Remove fulu fork from sim tests due to https://github.com/sigp/lighthouse/pull/7199#issuecomment-2852281176
This commit is contained in:
Jimmy Chen
2025-05-08 18:43:44 +10:00
committed by GitHub
parent e90fcbe657
commit 4b9c16fc71
6 changed files with 46 additions and 34 deletions

View File

@@ -295,7 +295,7 @@ jobs:
with: with:
channel: stable channel: stable
cache-target: release cache-target: release
- name: Run a basic beacon chain sim that starts from Bellatrix - name: Run a basic beacon chain sim that starts from Deneb
run: cargo run --release --bin simulator basic-sim run: cargo run --release --bin simulator basic-sim
fallback-simulator-ubuntu: fallback-simulator-ubuntu:
name: fallback-simulator-ubuntu name: fallback-simulator-ubuntu

View File

@@ -331,7 +331,7 @@ impl<E: EthSpec> PendingComponents<E> {
format!( format!(
"block {} blobs {}/{}", "block {} blobs {}/{}",
block_count, block_count,
self.verified_blobs.len(), self.verified_blobs.iter().flatten().count(),
num_expected_blobs num_expected_blobs
) )
} }

View File

@@ -10,7 +10,6 @@ use derivative::Derivative;
use fork_choice::ProtoBlock; use fork_choice::ProtoBlock;
use kzg::{Error as KzgError, Kzg}; use kzg::{Error as KzgError, Kzg};
use proto_array::Block; use proto_array::Block;
use slasher::test_utils::E;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use std::iter; use std::iter;
@@ -589,19 +588,19 @@ fn verify_proposer_and_signature<T: BeaconChainTypes>(
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<(), GossipDataColumnError> { ) -> Result<(), GossipDataColumnError> {
let column_slot = data_column.slot(); let column_slot = data_column.slot();
let column_epoch = column_slot.epoch(E::slots_per_epoch()); let slots_per_epoch = T::EthSpec::slots_per_epoch();
let column_epoch = column_slot.epoch(slots_per_epoch);
let column_index = data_column.index; let column_index = data_column.index;
let block_root = data_column.block_root(); let block_root = data_column.block_root();
let block_parent_root = data_column.block_parent_root(); let block_parent_root = data_column.block_parent_root();
let proposer_shuffling_root = let proposer_shuffling_root = if parent_block.slot.epoch(slots_per_epoch) == column_epoch {
if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == column_epoch { parent_block
parent_block .next_epoch_shuffling_id
.next_epoch_shuffling_id .shuffling_decision_block
.shuffling_decision_block } else {
} else { parent_block.root
parent_block.root };
};
// We lock the cache briefly to get or insert a OnceCell, then drop the lock // We lock the cache briefly to get or insert a OnceCell, then drop the lock
// before doing proposer shuffling calculation via `OnceCell::get_or_try_init`. This avoids // before doing proposer shuffling calculation via `OnceCell::get_or_try_init`. This avoids
@@ -649,7 +648,7 @@ fn verify_proposer_and_signature<T: BeaconChainTypes>(
let proposer_index = *epoch_proposers let proposer_index = *epoch_proposers
.proposers .proposers
.get(column_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) .get(column_slot.as_usize() % slots_per_epoch as usize)
.ok_or_else(|| BeaconChainError::NoProposerForSlot(column_slot))?; .ok_or_else(|| BeaconChainError::NoProposerForSlot(column_slot))?;
let fork = epoch_proposers.fork; let fork = epoch_proposers.fork;

View File

@@ -7,7 +7,6 @@ use environment::RuntimeContext;
use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, Timeouts}; use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, Timeouts};
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use tempfile::{Builder as TempBuilder, TempDir}; use tempfile::{Builder as TempBuilder, TempDir};
@@ -249,7 +248,7 @@ impl<E: EthSpec> LocalExecutionNode<E> {
if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) { if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) {
panic!("Failed to write jwt file {}", e); panic!("Failed to write jwt file {}", e);
} }
let spec = Arc::new(E::default_spec()); let spec = context.eth2_config.spec.clone();
Self { Self {
server: MockServer::new_with_config( server: MockServer::new_with_config(
&context.executor.handle().unwrap(), &context.executor.handle().unwrap(),

View File

@@ -18,6 +18,7 @@ use environment::tracing_common;
use tracing_subscriber::prelude::*; use tracing_subscriber::prelude::*;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
use logging::build_workspace_filter;
use tokio::time::sleep; use tokio::time::sleep;
use types::{Epoch, EthSpec, MinimalEthSpec}; use types::{Epoch, EthSpec, MinimalEthSpec};
@@ -25,10 +26,9 @@ const END_EPOCH: u64 = 16;
const GENESIS_DELAY: u64 = 32; const GENESIS_DELAY: u64 = 32;
const ALTAIR_FORK_EPOCH: u64 = 0; const ALTAIR_FORK_EPOCH: u64 = 0;
const BELLATRIX_FORK_EPOCH: u64 = 0; const BELLATRIX_FORK_EPOCH: u64 = 0;
const CAPELLA_FORK_EPOCH: u64 = 1; const CAPELLA_FORK_EPOCH: u64 = 0;
const DENEB_FORK_EPOCH: u64 = 2; const DENEB_FORK_EPOCH: u64 = 0;
// const ELECTRA_FORK_EPOCH: u64 = 3; const ELECTRA_FORK_EPOCH: u64 = 2;
// const FULU_FORK_EPOCH: u64 = 4;
const SUGGESTED_FEE_RECIPIENT: [u8; 20] = const SUGGESTED_FEE_RECIPIENT: [u8; 20] =
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1];
@@ -116,7 +116,11 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> {
); );
if let Err(e) = tracing_subscriber::registry() if let Err(e) = tracing_subscriber::registry()
.with(stdout_logging_layer.with_filter(logger_config.debug_level)) .with(
stdout_logging_layer
.with_filter(logger_config.debug_level)
.with_filter(build_workspace_filter()?),
)
.try_init() .try_init()
{ {
eprintln!("Failed to initialize dependency logging: {e}"); eprintln!("Failed to initialize dependency logging: {e}");
@@ -130,8 +134,8 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> {
let genesis_delay = GENESIS_DELAY; let genesis_delay = GENESIS_DELAY;
// Convenience variables. Update these values when adding a newer fork. // Convenience variables. Update these values when adding a newer fork.
let latest_fork_version = spec.deneb_fork_version; let latest_fork_version = spec.electra_fork_version;
let latest_fork_start_epoch = DENEB_FORK_EPOCH; let latest_fork_start_epoch = ELECTRA_FORK_EPOCH;
spec.seconds_per_slot /= speed_up_factor; spec.seconds_per_slot /= speed_up_factor;
spec.seconds_per_slot = max(1, spec.seconds_per_slot); spec.seconds_per_slot = max(1, spec.seconds_per_slot);
@@ -142,8 +146,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> {
spec.bellatrix_fork_epoch = Some(Epoch::new(BELLATRIX_FORK_EPOCH)); spec.bellatrix_fork_epoch = Some(Epoch::new(BELLATRIX_FORK_EPOCH));
spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH));
spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH));
//spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH));
//spec.fulu_fork_epoch = Some(Epoch::new(FULU_FORK_EPOCH));
let spec = Arc::new(spec); let spec = Arc::new(spec);
env.eth2_config.spec = spec.clone(); env.eth2_config.spec = spec.clone();

View File

@@ -128,17 +128,23 @@ pub async fn verify_full_block_production_up_to<E: EthSpec>(
slot_delay(slot, slot_duration).await; slot_delay(slot, slot_duration).await;
let beacon_nodes = network.beacon_nodes.read(); let beacon_nodes = network.beacon_nodes.read();
let beacon_chain = beacon_nodes[0].client.beacon_chain().unwrap(); let beacon_chain = beacon_nodes[0].client.beacon_chain().unwrap();
let num_blocks = beacon_chain let block_slots = beacon_chain
.chain_dump() .chain_dump()
.unwrap() .unwrap()
.iter() .iter()
.take_while(|s| s.beacon_block.slot() <= slot) .take_while(|s| s.beacon_block.slot() <= slot)
.count(); .map(|s| s.beacon_block.slot().as_usize())
.collect::<Vec<_>>();
let num_blocks = block_slots.len();
if num_blocks != slot.as_usize() + 1 { if num_blocks != slot.as_usize() + 1 {
let missed_slots = (0..slot.as_usize())
.filter(|slot| !block_slots.contains(slot))
.collect::<Vec<_>>();
return Err(format!( return Err(format!(
"There wasn't a block produced at every slot, got: {}, expected: {}", "There wasn't a block produced at every slot, got: {}, expected: {}, missed: {:?}",
num_blocks, num_blocks,
slot.as_usize() + 1 slot.as_usize() + 1,
missed_slots
)); ));
} }
Ok(()) Ok(())
@@ -185,12 +191,17 @@ pub async fn verify_full_sync_aggregates_up_to<E: EthSpec>(
.get_beacon_blocks::<E>(BlockId::Slot(Slot::new(slot))) .get_beacon_blocks::<E>(BlockId::Slot(Slot::new(slot)))
.await .await
.map(|resp| { .map(|resp| {
resp.unwrap() resp.unwrap_or_else(|| {
.data panic!(
.message() "Beacon block for slot {} not returned from Beacon API",
.body() slot
.sync_aggregate() )
.map(|agg| agg.num_set_bits()) })
.data
.message()
.body()
.sync_aggregate()
.map(|agg| agg.num_set_bits())
}) })
.map_err(|e| format!("Error while getting beacon block: {:?}", e))? .map_err(|e| format!("Error while getting beacon block: {:?}", e))?
.map_err(|_| format!("Altair block {} should have sync aggregate", slot))?; .map_err(|_| format!("Altair block {} should have sync aggregate", slot))?;