diff --git a/beacon_node/beacon_chain/tests/electra.rs b/beacon_node/beacon_chain/tests/electra.rs index 707ae5cff4..bea3ca1440 100644 --- a/beacon_node/beacon_chain/tests/electra.rs +++ b/beacon_node/beacon_chain/tests/electra.rs @@ -1,14 +1,45 @@ #![cfg(not(debug_assertions))] // Tests run too slow in debug. -use beacon_chain::test_utils::BeaconChainHarness; +use beacon_chain::{ + builder::BeaconChainBuilder, + test_utils::{get_kzg, mock_execution_layer_from_parts, BeaconChainHarness, DiskHarnessType}, + ChainConfig, MigratorConfig, StateSkipConfig, +}; +use logging::test_logger; +use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::{ per_block_processing, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use std::sync::Arc; +use std::time::Duration; +use store::{database::interface::BeaconNodeBackend, HotColdDB, StoreConfig}; +use tempfile::{tempdir, TempDir}; use types::*; type E = MainnetEthSpec; +fn get_store( + db_path: &TempDir, + config: StoreConfig, + spec: Arc, +) -> Arc, BeaconNodeBackend>> { + let hot_path = db_path.path().join("chain_db"); + let cold_path = db_path.path().join("freezer_db"); + let blobs_path = db_path.path().join("blobs_db"); + let log = test_logger(); + + HotColdDB::open( + &hot_path, + &cold_path, + &blobs_path, + |_, _, _| Ok(()), + config, + spec.into(), + log, + ) + .expect("disk store should initialize") +} + #[tokio::test] async fn signature_verify_chain_segment_pubkey_cache() { let initial_validator_count = 32; @@ -17,7 +48,13 @@ async fn signature_verify_chain_segment_pubkey_cache() { let pre_deposit_slot = deposit_slot - 1; let spec = Arc::new(ForkName::Electra.make_genesis_spec(E::default_spec())); + // Keep historic states on main harness. + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }; let harness = BeaconChainHarness::builder(E::default()) + .chain_config(chain_config) .spec(spec.clone()) .logger(logging::test_logger()) .deterministic_keypairs(initial_validator_count) @@ -103,6 +140,7 @@ async fn signature_verify_chain_segment_pubkey_cache() { pre_finalized_deposit_state.validators().len(), initial_validator_count ); + let new_epoch_start_slot = pre_finalized_deposit_state.slot() + E::slots_per_epoch() + 1; // New validator should not be in the pubkey cache yet. assert_eq!( @@ -112,34 +150,87 @@ async fn signature_verify_chain_segment_pubkey_cache() { .unwrap(), None ); - let new_validator_index = initial_validator_count as u64; + let new_validator_index = initial_validator_count; - // Keep producing blocks (but not processing them) until we find one signed by our new - // validator. - // FIXME: probably need to use the harness so we can prepare payloads properly - let mut state = pre_finalized_deposit_state; - let mut slot = state.slot() + 1; - let mut blocks = vec![]; - loop { - let (block, post_state) = harness.make_block(state, slot).await; - let proposer_index = block.0.message().proposer_index(); + // Produce blocks in the next epoch. Statistically one of these should be signed by our new + // validator (99% probability). + harness.extend_to_slot(new_epoch_start_slot).await; - blocks.push(block); + let chain_dump = harness.chain.chain_dump(); - state = post_state; - slot = slot + 1; - - if proposer_index == new_validator_index { - break; - } - } - - // New validator should still not be in the pubkey cache yet. + // New validator should be in the pubkey cache now. assert_eq!( harness .chain .validator_index(&new_validator_pk_bytes) .unwrap(), - None + Some(new_validator_index) ); + + // Initialise a new harness using checkpoint sync, prior to the new deposit being finalized. + let datadir = tempdir().unwrap(); + let store = get_store(&datadir, Default::default(), spec.clone()); + + let kzg = get_kzg(&spec); + + let mock = mock_execution_layer_from_parts( + harness.spec.clone(), + harness.runtime.task_executor.clone(), + ); + + // Initialise a new beacon chain from the finalized checkpoint. + // The slot clock must be set to a time ahead of the checkpoint state. + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(spec.seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + + let checkpoint_slot = deposit_slot + .epoch(E::slots_per_epoch()) + .start_slot(E::slots_per_epoch()); + let mut checkpoint_state = harness + .chain + .state_at_slot(checkpoint_slot, StateSkipConfig::WithStateRoots) + .unwrap(); + let checkpoint_state_root = checkpoint_state.update_tree_hash_cache().unwrap(); + let checkpoint_block_root = checkpoint_state.get_latest_block_root(checkpoint_state_root); + let checkpoint_block = harness + .chain + .get_block(&checkpoint_block_root) + .await + .unwrap() + .unwrap(); + let checkpoint_blobs_opt = harness + .chain + .get_or_reconstruct_blobs(&checkpoint_block_root) + .unwrap(); + let genesis_state = harness + .chain + .state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots) + .unwrap(); + let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); + + let beacon_chain = BeaconChainBuilder::>::new(MainnetEthSpec, kzg) + .store(store.clone()) + .custom_spec(spec.clone()) + .task_executor(harness.chain.task_executor.clone()) + .logger(harness.runtime.log.clone()) + .weak_subjectivity_state( + checkpoint_state, + checkpoint_block.clone(), + checkpoint_blobs_opt.clone(), + genesis_state, + ) + .unwrap() + .shutdown_sender(shutdown_tx) + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(slot_clock) + .chain_config(ChainConfig::default()) + .execution_layer(Some(mock.el)) + .build() + .expect("should build"); }