use crate::blob_verification::GossipVerifiedBlob; use crate::block_verification_types::{AsBlock, RpcBlock}; use crate::data_column_verification::CustodyDataColumn; use crate::kzg_utils::build_data_column_sidecars; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, single_attestation::single_attestation_to_attestation, sync_committee_verification::Error as SyncCommitteeError, validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; use crate::{ builder::{BeaconChainBuilder, Witness}, BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler, StateSkipConfig, }; use crate::{get_block_root, BeaconBlockResponseWrapper}; use bls::get_withdrawal_credentials; use eth2::types::SignedBlockContentsTuple; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ auth::JwtKey, test_utils::{ ExecutionBlockGenerator, MockBuilder, MockExecutionLayer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, }; use futures::channel::mpsc::Receiver; pub use genesis::{InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use kzg::trusted_setup::get_trusted_setup; use kzg::{Kzg, TrustedSetup}; use logging::create_test_tracing_subscriber; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; use parking_lot::{Mutex, RwLockWriteGuard}; use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, LazyLock}; use std::time::Duration; use store::database::interface::BeaconNodeBackend; use store::{config::StoreConfig, HotColdDB, ItemStore, MemoryStore}; use task_executor::TaskExecutor; use task_executor::{test_utils::TestRuntime, ShutdownReason}; use tree_hash::TreeHash; use types::indexed_attestation::IndexedAttestationBase; use types::payload::BlockProductionVersion; pub use types::test_utils::generate_deterministic_keypairs; use types::test_utils::TestRandom; use types::{typenum::U4294967296, *}; // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // Environment variable to read if `fork_from_env` feature is enabled. pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // Pre-computed data column sidecar using a single static blob from: // `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz` pub const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] = include_bytes!("test_utils/fixtures/test_data_column_sidecars.ssz"); // Default target aggregators to set during testing, this ensures an aggregator at each slot. // // You should mutate the `ChainSpec` prior to initialising the harness if you would like to use // a different value. pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::MAX; static KZG: LazyLock> = LazyLock::new(|| { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); let kzg = Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg"); Arc::new(kzg) }); static KZG_PEERDAS: LazyLock> = LazyLock::new(|| { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); let kzg = Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg"); Arc::new(kzg) }); static KZG_NO_PRECOMP: LazyLock> = LazyLock::new(|| { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); let kzg = Kzg::new_from_trusted_setup_no_precomp(trusted_setup).expect("should create kzg"); Arc::new(kzg) }); pub fn get_kzg(spec: &ChainSpec) -> Arc { if spec.fulu_fork_epoch.is_some() { KZG_PEERDAS.clone() } else if spec.deneb_fork_epoch.is_some() { KZG.clone() } else { KZG_NO_PRECOMP.clone() } } pub type BaseHarnessType = Witness; pub type DiskHarnessType = BaseHarnessType, BeaconNodeBackend>; pub type EphemeralHarnessType = BaseHarnessType, MemoryStore>; pub type BoxedMutator = Box< dyn FnOnce( BeaconChainBuilder>, ) -> BeaconChainBuilder>, >; pub type AddBlocksResult = ( HashMap, HashMap, SignedBeaconBlockHash, BeaconState, ); /// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { /// Produce blocks upon the canonical head (normal case). OnCanonicalHead, /// Ignore the canonical head and produce blocks upon the block at the given slot. /// /// Useful for simulating forks. ForkCanonicalChainAt { /// The slot of the parent of the first block produced. previous_slot: Slot, /// The slot of the first block produced (must be higher than `previous_slot`. first_slot: Slot, }, } /// Indicates how the `BeaconChainHarness` should produce attestations. #[derive(Clone, Debug)] pub enum AttestationStrategy { /// All validators attest to whichever block the `BeaconChainHarness` has produced. AllValidators, /// Only the given validators should attest. All others should fail to produce attestations. SomeValidators(Vec), } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SyncCommitteeStrategy { /// All sync committee validators sign. AllValidators, /// No validators sign. NoValidators, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum LightClientStrategy { Enabled, Disabled, } /// Indicates whether the `BeaconChainHarness` should use the `state.current_sync_committee` or /// `state.next_sync_committee` when creating sync messages or contributions. #[derive(Clone, Debug)] pub enum RelativeSyncCommittee { Current, Next, } fn make_rng() -> Mutex { // Nondeterminism in tests is a highly undesirable thing. Seed the RNG to some arbitrary // but fixed value for reproducibility. Mutex::new(StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64)) } /// Return a `ChainSpec` suitable for test usage. /// /// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment /// variable. Otherwise use the default spec. pub fn test_spec() -> ChainSpec { let mut spec = if cfg!(feature = "fork_from_env") { let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { panic!( "{} env var must be defined when using fork_from_env: {:?}", FORK_NAME_ENV_VAR, e ) }); let fork = ForkName::from_str(fork_name.as_str()).unwrap(); fork.make_genesis_spec(E::default_spec()) } else { E::default_spec() }; // Set target aggregators to a high value by default. spec.target_aggregators_per_committee = DEFAULT_TARGET_AGGREGATORS; spec } pub struct Builder { eth_spec_instance: T::EthSpec, spec: Option>, validator_keypairs: Option>, withdrawal_keypairs: Vec>, chain_config: Option, store_config: Option, #[allow(clippy::type_complexity)] store: Option>>, initial_mutator: Option>, store_mutator: Option>, execution_layer: Option>, mock_execution_layer: Option>, testing_slot_clock: Option, validator_monitor_config: Option, genesis_state_builder: Option>, import_all_data_columns: bool, runtime: TestRuntime, } impl Builder> { pub fn fresh_ephemeral_store(mut self) -> Self { let spec = self.spec.as_ref().expect("cannot build without spec"); let validator_keypairs = self .validator_keypairs .clone() .expect("cannot build without validator keypairs"); let store = Arc::new( HotColdDB::open_ephemeral(self.store_config.clone().unwrap_or_default(), spec.clone()) .unwrap(), ); let genesis_state_builder = self.genesis_state_builder.take().unwrap_or_else(|| { // Set alternating withdrawal credentials if no builder is specified. InteropGenesisBuilder::default().set_alternating_eth1_withdrawal_credentials() }); let mutator = move |builder: BeaconChainBuilder<_>| { let header = generate_genesis_header::(builder.get_spec(), false); let genesis_state = genesis_state_builder .set_opt_execution_payload_header(header) .build_genesis_state( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), builder.get_spec(), ) .expect("should generate interop state"); builder .genesis_state(genesis_state) .expect("should build state using recent genesis") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } /// Create a new ephemeral store that uses the specified `genesis_state`. pub fn genesis_state_ephemeral_store(mut self, genesis_state: BeaconState) -> Self { let spec = self.spec.as_ref().expect("cannot build without spec"); let store = Arc::new( HotColdDB::open_ephemeral(self.store_config.clone().unwrap_or_default(), spec.clone()) .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { builder .genesis_state(genesis_state) .expect("should build state using recent genesis") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } /// Manually restore from a given `MemoryStore`. pub fn resumed_ephemeral_store( mut self, store: Arc, MemoryStore>>, ) -> Self { let mutator = move |builder: BeaconChainBuilder<_>| { builder .resume_from_db() .expect("should resume from database") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } } impl Builder> { /// Disk store, start from genesis. pub fn fresh_disk_store( mut self, store: Arc, BeaconNodeBackend>>, ) -> Self { let validator_keypairs = self .validator_keypairs .clone() .expect("cannot build without validator keypairs"); let genesis_state_builder = self.genesis_state_builder.take().unwrap_or_else(|| { // Set alternating withdrawal credentials if no builder is specified. InteropGenesisBuilder::default().set_alternating_eth1_withdrawal_credentials() }); let mutator = move |builder: BeaconChainBuilder<_>| { let header = generate_genesis_header::(builder.get_spec(), false); let genesis_state = genesis_state_builder .set_opt_execution_payload_header(header) .build_genesis_state( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), builder.get_spec(), ) .expect("should generate interop state"); builder .genesis_state(genesis_state) .expect("should build state using recent genesis") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } /// Disk store, resume. pub fn resumed_disk_store( mut self, store: Arc, BeaconNodeBackend>>, ) -> Self { let mutator = move |builder: BeaconChainBuilder<_>| { builder .resume_from_db() .expect("should resume from database") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } } impl Builder> where E: EthSpec, Hot: ItemStore, Cold: ItemStore, { pub fn new(eth_spec_instance: E) -> Self { let runtime = TestRuntime::default(); Self { eth_spec_instance, spec: None, validator_keypairs: None, withdrawal_keypairs: vec![], chain_config: None, store_config: None, store: None, initial_mutator: None, store_mutator: None, execution_layer: None, mock_execution_layer: None, testing_slot_clock: None, validator_monitor_config: None, genesis_state_builder: None, import_all_data_columns: false, runtime, } } pub fn deterministic_keypairs(self, num_keypairs: usize) -> Self { self.keypairs(types::test_utils::generate_deterministic_keypairs( num_keypairs, )) } pub fn keypairs(mut self, validator_keypairs: Vec) -> Self { self.validator_keypairs = Some(validator_keypairs); self } pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { self.withdrawal_keypairs = withdrawal_keypairs; self } /// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to /// the "determistic" values, regardless of wether or not the validator has /// a BLS or execution address in the genesis deposits. /// /// This aligns with the withdrawal commitments used in the "interop" /// genesis states. pub fn deterministic_withdrawal_keypairs(self, num_keypairs: usize) -> Self { self.withdrawal_keypairs( types::test_utils::generate_deterministic_keypairs(num_keypairs) .into_iter() .map(Option::Some) .collect(), ) } pub fn default_spec(self) -> Self { self.spec_or_default(None) } pub fn spec(self, spec: Arc) -> Self { self.spec_or_default(Some(spec)) } pub fn spec_or_default(mut self, spec: Option>) -> Self { self.spec = Some(spec.unwrap_or_else(|| Arc::new(test_spec::()))); self } /// This mutator will be run before the `store_mutator`. pub fn initial_mutator(mut self, mutator: BoxedMutator) -> Self { assert!( self.initial_mutator.is_none(), "initial mutator already set" ); self.initial_mutator = Some(mutator); self } /// This mutator will be run after the `initial_mutator`. pub fn store_mutator(mut self, mutator: BoxedMutator) -> Self { assert!(self.store_mutator.is_none(), "store mutator already set"); self.store_mutator = Some(mutator); self } pub fn validator_monitor_config( mut self, validator_monitor_config: ValidatorMonitorConfig, ) -> Self { self.validator_monitor_config = Some(validator_monitor_config); self } /// Purposefully replace the `store_mutator`. pub fn override_store_mutator(mut self, mutator: BoxedMutator) -> Self { assert!(self.store_mutator.is_some(), "store mutator not set"); self.store_mutator = Some(mutator); self } pub fn chain_config(mut self, chain_config: ChainConfig) -> Self { self.chain_config = Some(chain_config); self } pub fn import_all_data_columns(mut self, import_all_data_columns: bool) -> Self { self.import_all_data_columns = import_all_data_columns; self } pub fn execution_layer_from_url(mut self, url: &str) -> Self { assert!( self.execution_layer.is_none(), "execution layer already defined" ); let url = SensitiveUrl::parse(url).ok(); let config = execution_layer::Config { execution_endpoint: url, secret_file: None, suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() }; let execution_layer = ExecutionLayer::from_config(config, self.runtime.task_executor.clone()).unwrap(); self.execution_layer = Some(execution_layer); self } pub fn execution_layer(mut self, el: Option>) -> Self { self.execution_layer = el; self } pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self { let mock = self .mock_execution_layer .as_mut() .expect("must have mock execution layer to recalculate fork times"); let spec = self .spec .clone() .expect("cannot recalculate fork times without spec"); mock.server.execution_block_generator().shanghai_time = spec.capella_fork_epoch.map(|epoch| { genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); mock.server.execution_block_generator().cancun_time = spec.deneb_fork_epoch.map(|epoch| { genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); mock.server.execution_block_generator().prague_time = spec.electra_fork_epoch.map(|epoch| { genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); mock.server.execution_block_generator().osaka_time = spec.fulu_fork_epoch.map(|epoch| { genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); self } pub fn mock_execution_layer(mut self) -> Self { let mock = mock_execution_layer_from_parts::( self.spec.clone().expect("cannot build without spec"), self.runtime.task_executor.clone(), ); self.execution_layer = Some(mock.el.clone()); self.mock_execution_layer = Some(mock); self } /// Instruct the mock execution engine to always return a "valid" response to any payload it is /// asked to execute. pub fn mock_execution_layer_all_payloads_valid(self) -> Self { self.mock_execution_layer .as_ref() .expect("requires mock execution layer") .server .all_payloads_valid(); self } pub fn testing_slot_clock(mut self, slot_clock: TestingSlotClock) -> Self { self.testing_slot_clock = Some(slot_clock); self } pub fn with_genesis_state_builder( mut self, f: impl FnOnce(InteropGenesisBuilder) -> InteropGenesisBuilder, ) -> Self { let builder = self.genesis_state_builder.take().unwrap_or_default(); self.genesis_state_builder = Some(f(builder)); self } pub fn build(self) -> BeaconChainHarness> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); let spec = self.spec.expect("cannot build without spec"); let seconds_per_slot = spec.seconds_per_slot; let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); let kzg = get_kzg(&spec); let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); let chain_config = self.chain_config.unwrap_or_default(); let mut builder = BeaconChainBuilder::new(self.eth_spec_instance, kzg.clone()) .custom_spec(spec.clone()) .store(self.store.expect("cannot build without store")) .store_migrator_config( MigratorConfig::default() .blocking() .epochs_per_migration(chain_config.epochs_per_migration), ) .task_executor(self.runtime.task_executor.clone()) .execution_layer(self.execution_layer) .shutdown_sender(shutdown_tx) .chain_config(chain_config) .import_all_data_columns(self.import_all_data_columns) .event_handler(Some(ServerSentEventHandler::new_with_capacity(5))) .validator_monitor_config(validator_monitor_config) .rng(Box::new(StdRng::seed_from_u64(42))); builder = if let Some(mutator) = self.initial_mutator { mutator(builder) } else { builder }; builder = if let Some(mutator) = self.store_mutator { mutator(builder) } else { builder }; // Initialize the slot clock only if it hasn't already been initialized. builder = if let Some(testing_slot_clock) = self.testing_slot_clock { builder.slot_clock(testing_slot_clock) } else if builder.get_slot_clock().is_none() { builder .testing_slot_clock(Duration::from_secs(seconds_per_slot)) .expect("should configure testing slot clock") } else { builder }; let chain = builder.build().expect("should build"); BeaconChainHarness { spec: chain.spec.clone(), chain: Arc::new(chain), validator_keypairs, withdrawal_keypairs: self.withdrawal_keypairs, shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, mock_builder: None, rng: make_rng(), } } } pub fn mock_execution_layer_from_parts( spec: Arc, task_executor: TaskExecutor, ) -> MockExecutionLayer { let shanghai_time = spec.capella_fork_epoch.map(|epoch| { HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); let cancun_time = spec.deneb_fork_epoch.map(|epoch| { HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); let prague_time = spec.electra_fork_epoch.map(|epoch| { HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); let osaka_time = spec.fulu_fork_epoch.map(|epoch| { HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); let kzg = get_kzg(&spec); MockExecutionLayer::new( task_executor, DEFAULT_TERMINAL_BLOCK, shanghai_time, cancun_time, prague_time, osaka_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, Some(kzg), ) } /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and /// attestations. /// /// Used for testing. pub struct BeaconChainHarness { pub validator_keypairs: Vec, /// Optional BLS withdrawal keys for each validator. /// /// If a validator index is missing from this vec or their entry is `None` then either /// no BLS withdrawal key was set for them (they had an address from genesis) or the test /// initializer neglected to set this field. pub withdrawal_keypairs: Vec>, pub chain: Arc>, pub spec: Arc, pub shutdown_receiver: Arc>>, pub runtime: TestRuntime, pub mock_execution_layer: Option>, pub mock_builder: Option>>, pub rng: Mutex, } pub type CommitteeSingleAttestations = Vec<(SingleAttestation, SubnetId)>; pub type CommitteeAttestations = Vec<(Attestation, SubnetId)>; pub type HarnessAttestations = Vec<(CommitteeAttestations, Option>)>; pub type HarnessSingleAttestations = Vec<( CommitteeSingleAttestations, Option>, )>; pub type HarnessSyncContributions = Vec<( Vec<(SyncCommitteeMessage, usize)>, Option>, )>; impl BeaconChainHarness> where E: EthSpec, Hot: ItemStore, Cold: ItemStore, { pub fn builder(eth_spec_instance: E) -> Builder> { create_test_tracing_subscriber(); Builder::new(eth_spec_instance) } pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator> { self.mock_execution_layer .as_ref() .expect("harness was not built with mock execution layer") .server .execution_block_generator() } pub fn set_mock_builder( &mut self, beacon_url: SensitiveUrl, ) -> impl futures::Future { let mock_el = self .mock_execution_layer .as_ref() .expect("harness was not built with mock execution layer"); let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); // Create the builder, listening on a free port. let (mock_builder, (addr, mock_builder_server)) = MockBuilder::new_for_testing( mock_el_url, beacon_url, self.spec.clone(), self.runtime.task_executor.clone(), ); // Set the builder URL in the execution layer now that its port is known. let port = addr.port(); mock_el .el .set_builder_url( SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(), None, None, false, ) .unwrap(); self.mock_builder = Some(Arc::new(mock_builder)); // Sanity check. let el_builder = self .chain .execution_layer .as_ref() .unwrap() .builder() .unwrap(); let mock_el_builder = mock_el.el.builder().unwrap(); assert!(Arc::ptr_eq(&el_builder, &mock_el_builder)); mock_builder_server } pub fn get_head_block(&self) -> RpcBlock { let block = self.chain.head_beacon_block(); let block_root = block.canonical_root(); self.build_rpc_block_from_store_blobs(Some(block_root), block) } pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { let block = self.chain.get_blinded_block(block_root).unwrap().unwrap(); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); self.build_rpc_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) } pub fn get_all_validators(&self) -> Vec { (0..self.validator_keypairs.len()).collect() } pub fn get_sampling_column_count(&self) -> usize { self.chain .data_availability_checker .custody_context() .num_of_data_columns_to_sample(None, &self.chain.spec) as usize } pub fn slots_per_epoch(&self) -> u64 { E::slots_per_epoch() } pub fn epoch_start_slot(&self, epoch: u64) -> u64 { let epoch = Epoch::new(epoch); epoch.start_slot(E::slots_per_epoch()).into() } pub fn shutdown_reasons(&self) -> Vec { let mutex = self.shutdown_receiver.clone(); let mut receiver = mutex.lock(); std::iter::from_fn(move || match receiver.try_next() { Ok(Some(s)) => Some(s), Ok(None) => panic!("shutdown sender dropped"), Err(_) => None, }) .collect() } pub fn get_current_state(&self) -> BeaconState { self.chain.head_beacon_state_cloned() } pub fn get_timestamp_at_slot(&self) -> u64 { let state = self.get_current_state(); compute_timestamp_at_slot(&state, state.slot(), &self.spec).unwrap() } pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); (head.beacon_state.clone(), state_root) } pub fn head_slot(&self) -> Slot { self.chain.canonical_head.cached_head().head_slot() } pub fn head_block_root(&self) -> Hash256 { self.chain.canonical_head.cached_head().head_block_root() } pub fn finalized_checkpoint(&self) -> Checkpoint { self.chain .canonical_head .cached_head() .finalized_checkpoint() } pub fn justified_checkpoint(&self) -> Checkpoint { self.chain .canonical_head .cached_head() .justified_checkpoint() } pub fn get_current_slot(&self) -> Slot { self.chain.slot().unwrap() } pub fn get_block( &self, block_hash: SignedBeaconBlockHash, ) -> Option>> { self.chain.get_blinded_block(&block_hash.into()).unwrap() } pub fn block_exists(&self, block_hash: SignedBeaconBlockHash) -> bool { self.get_block(block_hash).is_some() } pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store .load_hot_state(&state_hash.into(), true) .unwrap() .map(|(state, _)| state) } pub fn get_cold_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store .load_cold_state(&state_hash.into()) .unwrap() } pub fn hot_state_exists(&self, state_hash: BeaconStateHash) -> bool { self.get_hot_state(state_hash).is_some() } pub fn cold_state_exists(&self, state_hash: BeaconStateHash) -> bool { self.get_cold_state(state_hash).is_some() } pub fn is_skipped_slot(&self, state: &BeaconState, slot: Slot) -> bool { state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool { self.chain .heads() .iter() .any(|(head, _)| *head == Hash256::from(*block_hash)) } pub fn assert_knows_head(&self, head_block_root: Hash256) { let heads = self.chain.heads(); if !heads.iter().any(|(head, _)| *head == head_block_root) { let fork_choice = self.chain.canonical_head.fork_choice_read_lock(); if heads.is_empty() { let nodes = &fork_choice.proto_array().core_proto_array().nodes; panic!("Expected to know head block root {head_block_root:?}, but heads is empty. Nodes: {nodes:#?}"); } else { panic!( "Expected to know head block root {head_block_root:?}, known heads {heads:#?}" ); } } } pub async fn make_blinded_block( &self, state: BeaconState, slot: Slot, ) -> (SignedBlindedBeaconBlock, BeaconState) { let (unblinded, new_state) = self.make_block(state, slot).await; ((*unblinded.0).clone().into(), new_state) } /// Returns a newly created block, signed by the proposer for the given slot. pub async fn make_block( &self, mut state: BeaconState, slot: Slot, ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); state.build_caches(&self.spec).expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); // If we produce two blocks for the same slot, they hash up to the same value and // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); let BeaconBlockResponseWrapper::Full(block_response) = self .chain .produce_block_on_state( state, None, slot, randao_reveal, Some(graffiti), ProduceBlockVerification::VerifyRandao, None, BlockProductionVersion::FullV2, ) .await .unwrap() else { panic!("Should always be a full payload response"); }; let signed_block = Arc::new(block_response.block.sign( &self.validator_keypairs[proposer_index].sk, &block_response.state.fork(), block_response.state.genesis_validators_root(), &self.spec, )); let block_contents: SignedBlockContentsTuple = if signed_block.fork_name_unchecked().deneb_enabled() { (signed_block, block_response.blob_items) } else { (signed_block, None) }; (block_contents, block_response.state) } /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after /// caches are built but before the generated block is processed. pub async fn make_block_return_pre_state( &self, mut state: BeaconState, slot: Slot, ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); state.build_caches(&self.spec).expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); // If we produce two blocks for the same slot, they hash up to the same value and // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); let pre_state = state.clone(); let BeaconBlockResponseWrapper::Full(block_response) = self .chain .produce_block_on_state( state, None, slot, randao_reveal, Some(graffiti), ProduceBlockVerification::VerifyRandao, None, BlockProductionVersion::FullV2, ) .await .unwrap() else { panic!("Should always be a full payload response"); }; let signed_block = Arc::new(block_response.block.sign( &self.validator_keypairs[proposer_index].sk, &block_response.state.fork(), block_response.state.genesis_validators_root(), &self.spec, )); let block_contents: SignedBlockContentsTuple = if signed_block.fork_name_unchecked().deneb_enabled() { (signed_block, block_response.blob_items) } else { (signed_block, None) }; (block_contents, pre_state) } /// Create a randao reveal for a block at `slot`. pub fn sign_randao_reveal( &self, state: &BeaconState, proposer_index: usize, slot: Slot, ) -> Signature { let epoch = slot.epoch(E::slots_per_epoch()); let domain = self.spec.get_domain( epoch, Domain::Randao, &state.fork(), state.genesis_validators_root(), ); let message = epoch.signing_root(domain); let sk = &self.validator_keypairs[proposer_index].sk; sk.sign(message) } /// Sign a beacon block using the proposer's key. pub fn sign_beacon_block( &self, block: BeaconBlock, state: &BeaconState, ) -> SignedBeaconBlock { let proposer_index = block.proposer_index() as usize; block.sign( &self.validator_keypairs[proposer_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ) } #[allow(clippy::too_many_arguments)] pub fn produce_single_attestation_for_block( &self, slot: Slot, index: CommitteeIndex, beacon_block_root: Hash256, mut state: Cow>, state_root: Hash256, aggregation_bit_index: usize, validator_index: usize, ) -> Result { let epoch = slot.epoch(E::slots_per_epoch()); if state.slot() > slot { return Err(BeaconChainError::CannotAttestToFutureState); } else if state.current_epoch() < epoch { let mut_state = state.to_mut(); complete_state_advance( mut_state, Some(state_root), epoch.start_slot(E::slots_per_epoch()), &self.spec, )?; mut_state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } let committee_len = state.get_beacon_committee(slot, index)?.committee.len(); let target_slot = epoch.start_slot(E::slots_per_epoch()); let target_root = if state.slot() <= target_slot { beacon_block_root } else { *state.get_block_root(target_slot)? }; let attestation: Attestation = Attestation::empty_for_signing( index, committee_len, slot, beacon_block_root, state.current_justified_checkpoint(), Checkpoint { epoch, root: target_root, }, &self.spec, )?; let attestation = match attestation { Attestation::Electra(mut attn) => { attn.aggregation_bits .set(aggregation_bit_index, true) .unwrap(); Attestation::Electra(attn) } Attestation::Base(mut attn) => { attn.aggregation_bits .set(aggregation_bit_index, true) .unwrap(); Attestation::Base(attn) } }; let aggregation_bits = attestation.get_aggregation_bits(); if aggregation_bits.len() != 1 { panic!("Must be an unaggregated attestation") } let aggregation_bit = *aggregation_bits.first().unwrap(); let committee = state.get_beacon_committee(slot, index).unwrap(); let attester_index = committee .committee .iter() .enumerate() .find_map(|(i, &index)| { if aggregation_bit as usize == i { return Some(index); } None }) .unwrap(); let single_attestation = attestation.to_single_attestation_with_attester_index(attester_index as u64)?; let fork_name = self.spec.fork_name_at_slot::(attestation.data().slot); let attestation: Attestation = single_attestation_to_attestation(&single_attestation, committee.committee, fork_name) .unwrap(); assert_eq!( single_attestation.committee_index, attestation.committee_index().unwrap() ); assert_eq!(single_attestation.attester_index, validator_index as u64); Ok(single_attestation) } /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the /// `block` identified by `beacon_block_root`. /// /// The attestation doesn't _really_ have anything about it that makes it unaggregated per say, /// however this function is only required in the context of forming an unaggregated /// attestation. It would be an (undetectable) violation of the protocol to create a /// `SignedAggregateAndProof` based upon the output of this function. /// /// This function will produce attestations to optimistic blocks, which is against the /// specification but useful during testing. pub fn produce_unaggregated_attestation_for_block( &self, slot: Slot, index: CommitteeIndex, beacon_block_root: Hash256, mut state: Cow>, state_root: Hash256, ) -> Result, BeaconChainError> { let epoch = slot.epoch(E::slots_per_epoch()); if state.slot() > slot { return Err(BeaconChainError::CannotAttestToFutureState); } else if state.current_epoch() < epoch { let mut_state = state.to_mut(); complete_state_advance( mut_state, Some(state_root), epoch.start_slot(E::slots_per_epoch()), &self.spec, )?; mut_state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } let committee_len = state.get_beacon_committee(slot, index)?.committee.len(); let target_slot = epoch.start_slot(E::slots_per_epoch()); let target_root = if state.slot() <= target_slot { beacon_block_root } else { *state.get_block_root(target_slot)? }; Ok(Attestation::empty_for_signing( index, committee_len, slot, beacon_block_root, state.current_justified_checkpoint(), Checkpoint { epoch, root: target_root, }, &self.spec, )?) } /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. pub fn make_single_attestations( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, ) -> Vec { let fork = self .spec .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); self.make_single_attestations_with_opts( attesting_validators, state, state_root, head_block_root, attestation_slot, MakeAttestationOptions { limit: None, fork }, ) .0 } /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. pub fn make_unaggregated_attestations( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, ) -> Vec> { let fork = self .spec .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); self.make_unaggregated_attestations_with_opts( attesting_validators, state, state_root, head_block_root, attestation_slot, MakeAttestationOptions { limit: None, fork }, ) .0 } pub fn make_single_attestations_with_opts( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, opts: MakeAttestationOptions, ) -> (Vec, Vec) { let MakeAttestationOptions { limit, fork } = opts; let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); let num_attesters = AtomicUsize::new(0); let (attestations, split_attesters) = state .get_beacon_committees_at_slot(attestation_slot) .expect("should get committees") .iter() .map(|bc| { bc.committee .par_iter() .enumerate() .filter_map(|(i, validator_index)| { if !attesting_validators.contains(validator_index) { return None; } if let Some(limit) = limit { // This atomics stuff is necessary because we're under a par_iter, // and Rayon will deadlock if we use a mutex. if num_attesters.fetch_add(1, Ordering::Relaxed) >= limit { num_attesters.fetch_sub(1, Ordering::Relaxed); return None; } } let mut attestation = self .produce_single_attestation_for_block( attestation_slot, bc.index, head_block_root.into(), Cow::Borrowed(state), state_root, i, *validator_index, ) .unwrap(); attestation.signature = { let domain = self.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, &fork, state.genesis_validators_root(), ); let message = attestation.data.signing_root(domain); let mut agg_sig = AggregateSignature::infinity(); agg_sig.add_assign( &self.validator_keypairs[*validator_index].sk.sign(message), ); agg_sig }; let subnet_id = SubnetId::compute_subnet_for_single_attestation::( &attestation, committee_count, &self.chain.spec, ) .unwrap(); Some(((attestation, subnet_id), validator_index)) }) .unzip::<_, _, Vec<_>, Vec<_>>() }) .unzip::<_, _, Vec<_>, Vec<_>>(); // Flatten attesters. let attesters = split_attesters.into_iter().flatten().collect::>(); if let Some(limit) = limit { assert_eq!(limit, num_attesters.load(Ordering::Relaxed)); assert_eq!( limit, attesters.len(), "failed to generate `limit` attestations" ); } (attestations, attesters) } pub fn make_unaggregated_attestations_with_opts( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, opts: MakeAttestationOptions, ) -> (Vec>, Vec) { let MakeAttestationOptions { limit, fork } = opts; let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); let num_attesters = AtomicUsize::new(0); let (attestations, split_attesters) = state .get_beacon_committees_at_slot(attestation_slot) .expect("should get committees") .iter() .map(|bc| { bc.committee .par_iter() .enumerate() .filter_map(|(i, validator_index)| { if !attesting_validators.contains(validator_index) { return None; } if let Some(limit) = limit { // This atomics stuff is necessary because we're under a par_iter, // and Rayon will deadlock if we use a mutex. if num_attesters.fetch_add(1, Ordering::Relaxed) >= limit { num_attesters.fetch_sub(1, Ordering::Relaxed); return None; } } let mut attestation = self .produce_unaggregated_attestation_for_block( attestation_slot, bc.index, head_block_root.into(), Cow::Borrowed(state), state_root, ) .unwrap(); match attestation { Attestation::Base(ref mut att) => { att.aggregation_bits.set(i, true).unwrap() } Attestation::Electra(ref mut att) => { att.aggregation_bits.set(i, true).unwrap() } } *attestation.signature_mut() = { let domain = self.spec.get_domain( attestation.data().target.epoch, Domain::BeaconAttester, &fork, state.genesis_validators_root(), ); let message = attestation.data().signing_root(domain); let mut agg_sig = AggregateSignature::infinity(); agg_sig.add_assign( &self.validator_keypairs[*validator_index].sk.sign(message), ); agg_sig }; let subnet_id = SubnetId::compute_subnet_for_attestation::( attestation.to_ref(), committee_count, &self.chain.spec, ) .unwrap(); Some(((attestation, subnet_id), validator_index)) }) .unzip::<_, _, Vec<_>, Vec<_>>() }) .unzip::<_, _, Vec<_>, Vec<_>>(); // Flatten attesters. let attesters = split_attesters.into_iter().flatten().collect::>(); if let Some(limit) = limit { assert_eq!(limit, num_attesters.load(Ordering::Relaxed)); assert_eq!( limit, attesters.len(), "failed to generate `limit` attestations" ); } (attestations, attesters) } /// A list of sync messages for the given state. pub fn make_sync_committee_messages( &self, state: &BeaconState, head_block_root: Hash256, message_slot: Slot, relative_sync_committee: RelativeSyncCommittee, ) -> Vec> { let sync_committee: Arc> = match relative_sync_committee { RelativeSyncCommittee::Current => state .current_sync_committee() .expect("should be called on altair beacon state") .clone(), RelativeSyncCommittee::Next => state .next_sync_committee() .expect("should be called on altair beacon state") .clone(), }; let fork = self .spec .fork_at_epoch(message_slot.epoch(E::slots_per_epoch())); sync_committee .pubkeys .as_ref() .chunks(E::sync_subcommittee_size()) .map(|subcommittee| { subcommittee .iter() .enumerate() .map(|(subcommittee_position, pubkey)| { let validator_index = self .chain .validator_index(pubkey) .expect("should find validator index") .expect("pubkey should exist in the beacon chain"); let sync_message = SyncCommitteeMessage::new::( message_slot, head_block_root, validator_index as u64, &self.validator_keypairs[validator_index].sk, &fork, state.genesis_validators_root(), &self.spec, ); (sync_message, subcommittee_position) }) .collect() }) .collect() } /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. pub fn get_unaggregated_attestations( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, state_root: Hash256, head_block_root: Hash256, attestation_slot: Slot, ) -> Vec, SubnetId)>> { let validators: Vec = match attestation_strategy { AttestationStrategy::AllValidators => self.get_all_validators(), AttestationStrategy::SomeValidators(vals) => vals.clone(), }; self.make_unaggregated_attestations( &validators, state, state_root, head_block_root.into(), attestation_slot, ) } /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. pub fn get_single_attestations( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, state_root: Hash256, head_block_root: Hash256, attestation_slot: Slot, ) -> Vec> { let validators: Vec = match attestation_strategy { AttestationStrategy::AllValidators => self.get_all_validators(), AttestationStrategy::SomeValidators(vals) => vals.clone(), }; self.make_single_attestations( &validators, state, state_root, head_block_root.into(), attestation_slot, ) } pub fn make_attestations( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, block_hash: SignedBeaconBlockHash, slot: Slot, ) -> HarnessAttestations { self.make_attestations_with_limit( attesting_validators, state, state_root, block_hash, slot, None, ) .0 } /// Produce exactly `limit` attestations. /// /// Return attestations and vec of validator indices that attested. pub fn make_attestations_with_limit( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, block_hash: SignedBeaconBlockHash, slot: Slot, limit: Option, ) -> (HarnessAttestations, Vec) { let fork = self.spec.fork_at_epoch(slot.epoch(E::slots_per_epoch())); self.make_attestations_with_opts( attesting_validators, state, state_root, block_hash, slot, MakeAttestationOptions { limit, fork }, ) } pub fn make_attestations_with_opts( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, block_hash: SignedBeaconBlockHash, slot: Slot, opts: MakeAttestationOptions, ) -> (HarnessAttestations, Vec) { let MakeAttestationOptions { fork, .. } = opts; let (unaggregated_attestations, attesters) = self.make_unaggregated_attestations_with_opts( attesting_validators, state, state_root, block_hash, slot, opts, ); let aggregated_attestations: Vec>> = unaggregated_attestations .iter() .map(|committee_attestations| { // If there are any attestations in this committee, create an aggregate. if let Some((attestation, _)) = committee_attestations.first() { let bc = state .get_beacon_committee( attestation.data().slot, attestation.committee_index().unwrap(), ) .unwrap(); // Find an aggregator if one exists. Return `None` if there are no // aggregators. let aggregator_index = bc .committee .iter() .find(|&validator_index| { if !attesters.contains(validator_index) { return false; } let selection_proof = SelectionProof::new::( slot, &self.validator_keypairs[*validator_index].sk, &fork, state.genesis_validators_root(), &self.spec, ); selection_proof .is_aggregator(bc.committee.len(), &self.spec) .unwrap_or(false) }) .copied()?; let fork_name = self.spec.fork_name_at_slot::(slot); let aggregate = if fork_name.electra_enabled() { self.chain.get_aggregated_attestation_electra( slot, &attestation.data().tree_hash_root(), bc.index, ) } else { self.chain .get_aggregated_attestation_base(attestation.data()) } .unwrap() .unwrap_or_else(|| { committee_attestations.iter().skip(1).fold( attestation.clone(), |mut agg, (att, _)| { agg.aggregate(att.to_ref()); agg }, ) }); // If the chain is able to produce an aggregate, use that. Otherwise, build an // aggregate locally. let signed_aggregate = SignedAggregateAndProof::from_aggregate( aggregator_index as u64, aggregate.to_ref(), None, &self.validator_keypairs[aggregator_index].sk, &fork, state.genesis_validators_root(), &self.spec, ); Some(signed_aggregate) } else { None } }) .collect(); ( unaggregated_attestations .into_iter() .zip(aggregated_attestations) .collect(), attesters, ) } pub fn make_sync_contributions( &self, state: &BeaconState, block_hash: Hash256, slot: Slot, relative_sync_committee: RelativeSyncCommittee, ) -> HarnessSyncContributions { let sync_messages = self.make_sync_committee_messages(state, block_hash, slot, relative_sync_committee); let sync_contributions: Vec>> = sync_messages .iter() .enumerate() .map(|(subnet_id, committee_messages)| { // If there are any sync messages in this committee, create an aggregate. if let Some((sync_message, subcommittee_position)) = committee_messages.first() { let sync_committee: Arc> = state .current_sync_committee() .expect("should be called on altair beacon state") .clone(); let aggregator_index = sync_committee .get_subcommittee_pubkeys(subnet_id) .unwrap() .iter() .find_map(|pubkey| { let validator_index = self .chain .validator_index(pubkey) .expect("should find validator index") .expect("pubkey should exist in the beacon chain"); let selection_proof = SyncSelectionProof::new::( slot, subnet_id as u64, &self.validator_keypairs[validator_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ); selection_proof .is_aggregator::() .expect("should determine aggregator") .then_some(validator_index) })?; let default = SyncCommitteeContribution::from_message( sync_message, subnet_id as u64, *subcommittee_position, ) .expect("should derive sync contribution"); let aggregate = committee_messages.iter().skip(1).fold( default, |mut agg, (sig, position)| { let contribution = SyncCommitteeContribution::from_message( sig, subnet_id as u64, *position, ) .expect("should derive sync contribution"); agg.aggregate(&contribution); agg }, ); let signed_aggregate = SignedContributionAndProof::from_aggregate( aggregator_index as u64, aggregate, None, &self.validator_keypairs[aggregator_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ); Some(signed_aggregate) } else { None } }) .collect(); sync_messages.into_iter().zip(sync_contributions).collect() } pub fn make_attester_slashing(&self, validator_indices: Vec) -> AttesterSlashing { self.make_attester_slashing_with_epochs(validator_indices, None, None, None, None) } pub fn make_attester_slashing_with_epochs( &self, validator_indices: Vec, source1: Option, target1: Option, source2: Option, target2: Option, ) -> AttesterSlashing { let fork = self.chain.canonical_head.cached_head().head_fork(); let fork_name = self.spec.fork_name_at_slot::(Slot::new(0)); let data = AttestationData { slot: Slot::new(0), index: 0, beacon_block_root: Hash256::zero(), target: Checkpoint { root: Hash256::zero(), epoch: target1.unwrap_or(fork.epoch), }, source: Checkpoint { root: Hash256::zero(), epoch: source1.unwrap_or(Epoch::new(0)), }, }; let mut attestation_1 = if fork_name.electra_enabled() { IndexedAttestation::Electra(IndexedAttestationElectra { attesting_indices: VariableList::new(validator_indices).unwrap(), data, signature: AggregateSignature::infinity(), }) } else { IndexedAttestation::Base(IndexedAttestationBase { attesting_indices: VariableList::new(validator_indices).unwrap(), data, signature: AggregateSignature::infinity(), }) }; let mut attestation_2 = attestation_1.clone(); attestation_2.data_mut().index += 1; attestation_2.data_mut().source.epoch = source2.unwrap_or(Epoch::new(0)); attestation_2.data_mut().target.epoch = target2.unwrap_or(fork.epoch); for attestation in &mut [&mut attestation_1, &mut attestation_2] { match attestation { IndexedAttestation::Base(attestation) => { for i in attestation.attesting_indices.iter() { let sk = &self.validator_keypairs[*i as usize].sk; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, &fork, genesis_validators_root, ); let message = attestation.data.signing_root(domain); attestation.signature.add_assign(&sk.sign(message)); } } IndexedAttestation::Electra(attestation) => { for i in attestation.attesting_indices.iter() { let sk = &self.validator_keypairs[*i as usize].sk; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, &fork, genesis_validators_root, ); let message = attestation.data.signing_root(domain); attestation.signature.add_assign(&sk.sign(message)); } } } } if fork_name.electra_enabled() { AttesterSlashing::Electra(AttesterSlashingElectra { attestation_1: attestation_1.as_electra().unwrap().clone(), attestation_2: attestation_2.as_electra().unwrap().clone(), }) } else { AttesterSlashing::Base(AttesterSlashingBase { attestation_1: attestation_1.as_base().unwrap().clone(), attestation_2: attestation_2.as_base().unwrap().clone(), }) } } pub fn make_attester_slashing_different_indices( &self, validator_indices_1: Vec, validator_indices_2: Vec, ) -> AttesterSlashing { let fork_name = self.spec.fork_name_at_slot::(Slot::new(0)); let data = AttestationData { slot: Slot::new(0), index: 0, beacon_block_root: Hash256::zero(), target: Checkpoint { root: Hash256::zero(), epoch: Epoch::new(0), }, source: Checkpoint { root: Hash256::zero(), epoch: Epoch::new(0), }, }; let (mut attestation_1, mut attestation_2) = if fork_name.electra_enabled() { let attestation_1 = IndexedAttestationElectra { attesting_indices: VariableList::new(validator_indices_1).unwrap(), data: data.clone(), signature: AggregateSignature::infinity(), }; let attestation_2 = IndexedAttestationElectra { attesting_indices: VariableList::new(validator_indices_2).unwrap(), data, signature: AggregateSignature::infinity(), }; ( IndexedAttestation::Electra(attestation_1), IndexedAttestation::Electra(attestation_2), ) } else { let attestation_1 = IndexedAttestationBase { attesting_indices: VariableList::new(validator_indices_1).unwrap(), data: data.clone(), signature: AggregateSignature::infinity(), }; let attestation_2 = IndexedAttestationBase { attesting_indices: VariableList::new(validator_indices_2).unwrap(), data, signature: AggregateSignature::infinity(), }; ( IndexedAttestation::Base(attestation_1), IndexedAttestation::Base(attestation_2), ) }; attestation_2.data_mut().index += 1; let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { match attestation { IndexedAttestation::Base(attestation) => { for i in attestation.attesting_indices.iter() { let sk = &self.validator_keypairs[*i as usize].sk; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, &fork, genesis_validators_root, ); let message = attestation.data.signing_root(domain); attestation.signature.add_assign(&sk.sign(message)); } } IndexedAttestation::Electra(attestation) => { for i in attestation.attesting_indices.iter() { let sk = &self.validator_keypairs[*i as usize].sk; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, &fork, genesis_validators_root, ); let message = attestation.data.signing_root(domain); attestation.signature.add_assign(&sk.sign(message)); } } } } if fork_name.electra_enabled() { AttesterSlashing::Electra(AttesterSlashingElectra { attestation_1: attestation_1.as_electra().unwrap().clone(), attestation_2: attestation_2.as_electra().unwrap().clone(), }) } else { AttesterSlashing::Base(AttesterSlashingBase { attestation_1: attestation_1.as_base().unwrap().clone(), attestation_2: attestation_2.as_base().unwrap().clone(), }) } } pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { self.make_proposer_slashing_at_slot(validator_index, None) } pub fn make_proposer_slashing_at_slot( &self, validator_index: u64, slot_override: Option, ) -> ProposerSlashing { let mut block_header_1 = self.chain.head_beacon_block().message().block_header(); block_header_1.proposer_index = validator_index; if let Some(slot) = slot_override { block_header_1.slot = slot; } let mut block_header_2 = block_header_1.clone(); block_header_2.state_root = Hash256::zero(); let sk = &self.validator_keypairs[validator_index as usize].sk; let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; let mut signed_block_headers = vec![block_header_1, block_header_2] .into_iter() .map(|block_header| { block_header.sign::(sk, &fork, genesis_validators_root, &self.chain.spec) }) .collect::>(); ProposerSlashing { signed_header_2: signed_block_headers.remove(1), signed_header_1: signed_block_headers.remove(0), } } pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { let sk = &self.validator_keypairs[validator_index as usize].sk; let genesis_validators_root = self.chain.genesis_validators_root; VoluntaryExit { epoch, validator_index, } .sign(sk, genesis_validators_root, &self.chain.spec) } pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> { let propposer_slashing = self.make_proposer_slashing(validator_index); if let ObservationOutcome::New(verified_proposer_slashing) = self .chain .verify_proposer_slashing_for_gossip(propposer_slashing) .expect("should verify proposer slashing for gossip") { self.chain .import_proposer_slashing(verified_proposer_slashing); Ok(()) } else { Err("should observe new proposer slashing".to_string()) } } pub fn add_attester_slashing(&self, validator_indices: Vec) -> Result<(), String> { let attester_slashing = self.make_attester_slashing(validator_indices); if let ObservationOutcome::New(verified_attester_slashing) = self .chain .verify_attester_slashing_for_gossip(attester_slashing) .expect("should verify attester slashing for gossip") { self.chain .import_attester_slashing(verified_attester_slashing); Ok(()) } else { Err("should observe new attester slashing".to_string()) } } pub fn add_bls_to_execution_change( &self, validator_index: u64, address: Address, ) -> Result<(), String> { let signed_bls_change = self.make_bls_to_execution_change(validator_index, address); if let ObservationOutcome::New(verified_bls_change) = self .chain .verify_bls_to_execution_change_for_gossip(signed_bls_change) .expect("should verify BLS to execution change for gossip") { self.chain .import_bls_to_execution_change(verified_bls_change, ReceivedPreCapella::No) .then_some(()) .ok_or("should import BLS to execution change to the op pool".to_string()) } else { Err("should observe new BLS to execution change".to_string()) } } pub fn make_bls_to_execution_change( &self, validator_index: u64, address: Address, ) -> SignedBlsToExecutionChange { let keypair = self.get_withdrawal_keypair(validator_index); self.make_bls_to_execution_change_with_keys( validator_index, address, &keypair.pk, &keypair.sk, ) } pub fn make_bls_to_execution_change_with_keys( &self, validator_index: u64, address: Address, pubkey: &PublicKey, secret_key: &SecretKey, ) -> SignedBlsToExecutionChange { let genesis_validators_root = self.chain.genesis_validators_root; BlsToExecutionChange { validator_index, from_bls_pubkey: pubkey.compress(), to_execution_address: address, } .sign(secret_key, genesis_validators_root, &self.chain.spec) } pub fn get_withdrawal_keypair(&self, validator_index: u64) -> &Keypair { self.withdrawal_keypairs .get(validator_index as usize) .expect("BLS withdrawal key missing from harness") .as_ref() .expect("no withdrawal key for validator") } pub fn add_voluntary_exit( &self, block: &mut BeaconBlock, validator_index: u64, epoch: Epoch, ) { let exit = self.make_voluntary_exit(validator_index, epoch); block.body_mut().voluntary_exits_mut().push(exit).unwrap(); } /// Create a new block, apply `block_modifier` to it, sign it and return it. /// /// The state returned is a pre-block state at the same slot as the produced block. pub async fn make_block_with_modifier( &self, state: BeaconState, slot: Slot, block_modifier: impl FnOnce(&mut BeaconBlock), ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); let ((block, blobs), state) = self.make_block_return_pre_state(state, slot).await; let (mut block, _) = (*block).clone().deconstruct(); block_modifier(&mut block); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let signed_block = block.sign( &self.validator_keypairs[proposer_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ); ((Arc::new(signed_block), blobs), state) } pub async fn make_blob_with_modifier( &self, state: BeaconState, slot: Slot, blob_modifier: impl FnOnce(&mut BlobsList), ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); let ((block, mut blobs), state) = self.make_block_return_pre_state(state, slot).await; let (block, _) = (*block).clone().deconstruct(); blob_modifier(&mut blobs.as_mut().unwrap().1); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let signed_block = block.sign( &self.validator_keypairs[proposer_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ); ((Arc::new(signed_block), blobs), state) } pub fn make_deposits<'a>( &self, state: &'a mut BeaconState, num_deposits: usize, invalid_pubkey: Option, invalid_signature: Option, ) -> (Vec, &'a mut BeaconState) { let mut datas = vec![]; for _ in 0..num_deposits { let keypair = Keypair::random(); let pubkeybytes = PublicKeyBytes::from(keypair.pk.clone()); let mut data = DepositData { pubkey: pubkeybytes, withdrawal_credentials: Hash256::from_slice( &get_withdrawal_credentials(&keypair.pk, self.spec.bls_withdrawal_prefix_byte) [..], ), amount: self.spec.min_deposit_amount, signature: SignatureBytes::empty(), }; data.signature = data.create_signature(&keypair.sk, &self.spec); if let Some(invalid_pubkey) = invalid_pubkey { data.pubkey = invalid_pubkey; } if let Some(invalid_signature) = invalid_signature.clone() { data.signature = invalid_signature; } datas.push(data); } // Vector containing all leaves let leaves = datas .iter() .map(|data| data.tree_hash_root()) .collect::>(); // Building a VarList from leaves let deposit_data_list = VariableList::<_, U4294967296>::from(leaves.clone()); // Setting the deposit_root to be the tree_hash_root of the VarList state.eth1_data_mut().deposit_root = deposit_data_list.tree_hash_root(); state.eth1_data_mut().deposit_count = num_deposits as u64; *state.eth1_deposit_index_mut() = 0; // Building the merkle tree used for generating proofs let tree = MerkleTree::create(&leaves[..], self.spec.deposit_contract_tree_depth as usize); // Building proofs let mut proofs = vec![]; for i in 0..leaves.len() { let (_, mut proof) = tree .generate_proof(i, self.spec.deposit_contract_tree_depth as usize) .expect("should generate proof"); proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64))); proofs.push(proof); } // Building deposits let deposits = datas .into_par_iter() .zip(proofs.into_par_iter()) .map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| Deposit { proof, data }) .collect::>(); // Pushing deposits to block body (deposits, state) } pub async fn process_block( &self, slot: Slot, block_root: Hash256, block_contents: SignedBlockContentsTuple, ) -> Result { self.set_current_slot(slot); let (block, blob_items) = block_contents; let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?; let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), ) .await? .try_into() .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } pub async fn process_block_result( &self, block_contents: SignedBlockContentsTuple, ) -> Result { let (block, blob_items) = block_contents; let block_root = block.canonical_root(); let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?; let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), ) .await? .try_into() .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from /// the database. pub fn build_rpc_block_from_store_blobs( &self, block_root: Option, block: Arc>, ) -> RpcBlock { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); let has_blobs = block .message() .body() .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); if !has_blobs { return RpcBlock::new_without_blobs(Some(block_root), block); } // Blobs are stored as data columns from Fulu (PeerDAS) if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { let columns = self.chain.get_data_columns(&block_root).unwrap().unwrap(); let custody_columns = columns .into_iter() .map(CustodyDataColumn::from_asserted_custody) .collect::>(); RpcBlock::new_with_custody_columns(Some(block_root), block, custody_columns, &self.spec) .unwrap() } else { let blobs = self.chain.get_blobs(&block_root).unwrap().blobs(); RpcBlock::new(Some(block_root), block, blobs).unwrap() } } /// Builds an `RpcBlock` from a `SignedBeaconBlock` and `BlobsList`. fn build_rpc_block_from_blobs( &self, block_root: Hash256, block: Arc>>, blob_items: Option<(KzgProofs, BlobsList)>, ) -> Result, BlockError> { Ok(if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { let sampling_column_count = self.get_sampling_column_count(); if blob_items.is_some_and(|(_, blobs)| !blobs.is_empty()) { // Note: this method ignores the actual custody columns and just take the first // `sampling_column_count` for testing purpose only, because the chain does not // currently have any knowledge of the columns being custodied. let columns = generate_data_column_sidecars_from_block(&block, &self.spec) .into_iter() .take(sampling_column_count) .map(CustodyDataColumn::from_asserted_custody) .collect::>(); RpcBlock::new_with_custody_columns(Some(block_root), block, columns, &self.spec)? } else { RpcBlock::new_without_blobs(Some(block_root), block) } } else { let blobs = blob_items .map(|(proofs, blobs)| { BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec) }) .transpose() .unwrap(); RpcBlock::new(Some(block_root), block, blobs)? }) } pub fn process_attestations( &self, attestations: HarnessAttestations, state: &BeaconState, ) { let num_validators = self.validator_keypairs.len(); let mut unaggregated = Vec::with_capacity(num_validators); // This is an over-allocation, but it should be fine. It won't be *that* memory hungry and // it's nice to have fast tests. let mut aggregated = Vec::with_capacity(num_validators); for (unaggregated_attestations, maybe_signed_aggregate) in attestations.iter() { for (attn, subnet) in unaggregated_attestations { let aggregation_bits = attn.get_aggregation_bits(); if aggregation_bits.len() != 1 { panic!("Must be an unaggregated attestation") } let aggregation_bit = *aggregation_bits.first().unwrap(); let committee = state .get_beacon_committee(attn.data().slot, attn.committee_index().unwrap()) .unwrap(); let attester_index = committee .committee .iter() .enumerate() .find_map(|(i, &index)| { if aggregation_bit as usize == i { return Some(index); } None }) .unwrap(); let single_attestation = attn .to_single_attestation_with_attester_index(attester_index as u64) .unwrap(); unaggregated.push((single_attestation, Some(*subnet))); } if let Some(a) = maybe_signed_aggregate { aggregated.push(a) } } for result in self .chain .batch_verify_unaggregated_attestations_for_gossip( unaggregated.iter().map(|(attn, subnet)| (attn, *subnet)), ) .unwrap() { let verified = result.unwrap(); self.chain.add_to_naive_aggregation_pool(&verified).unwrap(); } for result in self .chain .batch_verify_aggregated_attestations_for_gossip(aggregated.into_iter()) .unwrap() { let verified = result.unwrap(); self.chain .apply_attestation_to_fork_choice(&verified) .unwrap(); self.chain.add_to_block_inclusion_pool(verified).unwrap(); } } pub fn set_current_slot(&self, slot: Slot) { let current_slot = self.chain.slot().unwrap(); let current_epoch = current_slot.epoch(E::slots_per_epoch()); let epoch = slot.epoch(E::slots_per_epoch()); assert!( epoch >= current_epoch, "Jumping backwards to an earlier epoch isn't well defined. \ Please generate test blocks epoch-by-epoch instead." ); self.chain.slot_clock.set_slot(slot.into()); } pub async fn add_block_at_slot( &self, slot: Slot, state: BeaconState, ) -> Result< ( SignedBeaconBlockHash, SignedBlockContentsTuple, BeaconState, ), BlockError, > { self.set_current_slot(slot); let (block_contents, new_state) = self.make_block(state, slot).await; let block_hash = self .process_block( slot, block_contents.0.canonical_root(), block_contents.clone(), ) .await?; Ok((block_hash, block_contents, new_state)) } pub fn attest_block( &self, state: &BeaconState, state_root: Hash256, block_hash: SignedBeaconBlockHash, block: &SignedBeaconBlock, validators: &[usize], ) { let attestations = self.make_attestations(validators, state, state_root, block_hash, block.slot()); self.process_attestations(attestations, state); } pub fn sync_committee_sign_block( &self, state: &BeaconState, block_hash: Hash256, slot: Slot, relative_sync_committee: RelativeSyncCommittee, ) { let sync_contributions = self.make_sync_contributions(state, block_hash, slot, relative_sync_committee); self.process_sync_contributions(sync_contributions).unwrap() } pub async fn add_attested_block_at_slot( &self, slot: Slot, state: BeaconState, state_root: Hash256, validators: &[usize], ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { self.add_attested_block_at_slot_with_sync( slot, state, state_root, validators, SyncCommitteeStrategy::NoValidators, ) .await } pub async fn add_attested_block_at_slot_with_sync( &self, slot: Slot, state: BeaconState, state_root: Hash256, validators: &[usize], sync_committee_strategy: SyncCommitteeStrategy, ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; self.attest_block(&state, state_root, block_hash, &block.0, validators); if sync_committee_strategy == SyncCommitteeStrategy::AllValidators && state.current_sync_committee().is_ok() { self.sync_committee_sign_block( &state, block_hash.into(), slot, if (slot + 1).epoch(E::slots_per_epoch()) % self.spec.epochs_per_sync_committee_period == 0 { RelativeSyncCommittee::Next } else { RelativeSyncCommittee::Current }, ); } Ok((block_hash, state)) } pub async fn add_attested_blocks_at_slots( &self, state: BeaconState, state_root: Hash256, slots: &[Slot], validators: &[usize], ) -> AddBlocksResult { self.add_attested_blocks_at_slots_with_sync( state, state_root, slots, validators, SyncCommitteeStrategy::NoValidators, ) .await } pub async fn add_attested_blocks_at_slots_with_sync( &self, state: BeaconState, state_root: Hash256, slots: &[Slot], validators: &[usize], sync_committee_strategy: SyncCommitteeStrategy, ) -> AddBlocksResult { assert!(!slots.is_empty()); self.add_attested_blocks_at_slots_given_lbh( state, state_root, slots, validators, None, sync_committee_strategy, ) .await } fn update_light_client_server_cache( &self, state: &BeaconState, slot: Slot, block_root: Hash256, ) { let fork_name = state.fork_name(&self.spec).unwrap(); if !fork_name.altair_enabled() { return; } let contributions = self.make_sync_contributions(state, block_root, slot, RelativeSyncCommittee::Current); for (_, contribution_and_proof) in contributions { let Some(contribution_and_proof) = contribution_and_proof else { continue; }; let contribution = contribution_and_proof.message.contribution; self.chain .op_pool .insert_sync_contribution(contribution.clone()) .unwrap(); self.chain .op_pool .insert_sync_contribution(contribution) .unwrap(); } let Some(sync_aggregate) = self.chain.op_pool.get_sync_aggregate(state).unwrap() else { return; }; let _ = self .chain .light_client_server_cache .recompute_and_cache_updates( self.chain.store.clone(), slot, &block_root, &sync_aggregate, &self.spec, ); } pub async fn add_attested_blocks_at_slots_with_lc_data( &self, mut state: BeaconState, state_root: Hash256, slots: &[Slot], validators: &[usize], mut latest_block_hash: Option, sync_committee_strategy: SyncCommitteeStrategy, ) -> AddBlocksResult { assert!(slots.is_sorted(), "Slots have to be in ascending order"); let mut block_hash_from_slot: HashMap = HashMap::new(); let mut state_hash_from_slot: HashMap = HashMap::new(); for slot in slots { let (block_hash, new_state) = self .add_attested_block_at_slot_with_sync( *slot, state, state_root, validators, sync_committee_strategy, ) .await .unwrap(); state = new_state; self.update_light_client_server_cache(&state, *slot, block_hash.into()); block_hash_from_slot.insert(*slot, block_hash); state_hash_from_slot.insert(*slot, state.canonical_root().unwrap().into()); latest_block_hash = Some(block_hash); } ( block_hash_from_slot, state_hash_from_slot, latest_block_hash.unwrap(), state, ) } async fn add_attested_blocks_at_slots_given_lbh( &self, mut state: BeaconState, state_root: Hash256, slots: &[Slot], validators: &[usize], mut latest_block_hash: Option, sync_committee_strategy: SyncCommitteeStrategy, ) -> AddBlocksResult { assert!(slots.is_sorted(), "Slots have to be in ascending order"); let mut block_hash_from_slot: HashMap = HashMap::new(); let mut state_hash_from_slot: HashMap = HashMap::new(); for slot in slots { // Using a `Box::pin` to reduce the stack size. Clippy was raising a lints. let (block_hash, new_state) = Box::pin(self.add_attested_block_at_slot_with_sync( *slot, state, state_root, validators, sync_committee_strategy, )) .await .unwrap(); state = new_state; block_hash_from_slot.insert(*slot, block_hash); state_hash_from_slot.insert(*slot, state.canonical_root().unwrap().into()); latest_block_hash = Some(block_hash); } ( block_hash_from_slot, state_hash_from_slot, latest_block_hash.unwrap(), state, ) } /// A monstrosity of great usefulness. /// /// Calls `add_attested_blocks_at_slots` for each of the chains in `chains`, /// taking care to batch blocks by epoch so that the slot clock gets advanced one /// epoch at a time. /// /// Chains is a vec of `(state, slots, validators)` tuples. pub async fn add_blocks_on_multiple_chains( &self, chains: Vec<(BeaconState, Vec, Vec)>, ) -> Vec> { let slots_per_epoch = E::slots_per_epoch(); let min_epoch = chains .iter() .map(|(_, slots, _)| slots.iter().min().unwrap()) .min() .unwrap() .epoch(slots_per_epoch); let max_epoch = chains .iter() .map(|(_, slots, _)| slots.iter().max().unwrap()) .max() .unwrap() .epoch(slots_per_epoch); let mut chains = chains .into_iter() .map(|(state, slots, validators)| { ( state, slots, validators, HashMap::new(), HashMap::new(), SignedBeaconBlockHash::from(Hash256::zero()), ) }) .collect::>(); for epoch in min_epoch.as_u64()..=max_epoch.as_u64() { let mut new_chains = vec![]; for ( mut head_state, slots, validators, mut block_hashes, mut state_hashes, head_block, ) in chains { let epoch_slots = slots .iter() .filter(|s| s.epoch(slots_per_epoch).as_u64() == epoch) .copied() .collect::>(); let head_state_root = head_state.update_tree_hash_cache().unwrap(); let (new_block_hashes, new_state_hashes, new_head_block, new_head_state) = self .add_attested_blocks_at_slots_given_lbh( head_state, head_state_root, &epoch_slots, &validators, Some(head_block), SyncCommitteeStrategy::NoValidators, // for backwards compat ) .await; block_hashes.extend(new_block_hashes); state_hashes.extend(new_state_hashes); new_chains.push(( new_head_state, slots, validators, block_hashes, state_hashes, new_head_block, )); } chains = new_chains; } chains .into_iter() .map(|(state, _, _, block_hashes, state_hashes, head_block)| { (block_hashes, state_hashes, head_block, state) }) .collect() } pub fn get_finalized_checkpoints(&self) -> HashSet { let chain_dump = self.chain.chain_dump().unwrap(); chain_dump .iter() .cloned() .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root) .filter(|block_hash| *block_hash != Hash256::zero()) .map(|hash| hash.into()) .collect() } /// Advance the slot of the `BeaconChain`. /// /// Does not produce blocks or attestations. pub fn advance_slot(&self) { self.chain.slot_clock.advance_slot(); } /// Advance the clock to `lookahead` before the start of `slot`. pub fn advance_to_slot_lookahead(&self, slot: Slot, lookahead: Duration) { let time = self.chain.slot_clock.start_of(slot).unwrap() - lookahead; self.chain.slot_clock.set_current_time(time); } /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } let num_slots = target_slot .as_usize() .checked_sub(self.chain.slot().unwrap().as_usize()) .expect("target_slot must be >= current_slot") .checked_add(1) .unwrap(); self.extend_slots(num_slots).await } /// Uses `Self::extend_chain` to `num_slots` blocks. /// /// Utilizes: /// /// - BlockStrategy::OnCanonicalHead, /// - AttestationStrategy::AllValidators, pub async fn extend_slots(&self, num_slots: usize) -> Hash256 { if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } self.extend_chain( num_slots, BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, ) .await } /// Uses `Self::extend_chain` to `num_slots` blocks. /// /// Utilizes: /// /// - BlockStrategy::OnCanonicalHead, /// - AttestationStrategy::SomeValidators(validators), pub async fn extend_slots_some_validators( &self, num_slots: usize, validators: Vec, ) -> Hash256 { if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } self.extend_chain( num_slots, BlockStrategy::OnCanonicalHead, AttestationStrategy::SomeValidators(validators), ) .await } /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the /// last-produced block (the head of the chain). /// /// Chain will be extended by `num_blocks` blocks. /// /// The `block_strategy` dictates where the new blocks will be placed. /// /// The `attestation_strategy` dictates which validators will attest to the newly created /// blocks. pub async fn extend_chain( &self, num_blocks: usize, block_strategy: BlockStrategy, attestation_strategy: AttestationStrategy, ) -> Hash256 { self.extend_chain_with_sync( num_blocks, block_strategy, attestation_strategy, SyncCommitteeStrategy::NoValidators, LightClientStrategy::Disabled, ) .await } pub async fn extend_chain_with_light_client_data( &self, num_blocks: usize, block_strategy: BlockStrategy, attestation_strategy: AttestationStrategy, ) -> Hash256 { self.extend_chain_with_sync( num_blocks, block_strategy, attestation_strategy, SyncCommitteeStrategy::NoValidators, LightClientStrategy::Enabled, ) .await } pub async fn extend_chain_with_sync( &self, num_blocks: usize, block_strategy: BlockStrategy, attestation_strategy: AttestationStrategy, sync_committee_strategy: SyncCommitteeStrategy, light_client_strategy: LightClientStrategy, ) -> Hash256 { let (mut state, slots) = match block_strategy { BlockStrategy::OnCanonicalHead => { let current_slot: u64 = self.get_current_slot().into(); let slots: Vec = (current_slot..(current_slot + (num_blocks as u64))) .map(Slot::new) .collect(); let state = self.get_current_state(); (state, slots) } BlockStrategy::ForkCanonicalChainAt { previous_slot, first_slot, } => { let first_slot_: u64 = first_slot.into(); let slots: Vec = (first_slot_..(first_slot_ + (num_blocks as u64))) .map(Slot::new) .collect(); let state = self .chain .state_at_slot(previous_slot, StateSkipConfig::WithStateRoots) .unwrap(); (state, slots) } }; let validators = match attestation_strategy { AttestationStrategy::AllValidators => self.get_all_validators(), AttestationStrategy::SomeValidators(vals) => vals, }; let state_root = state.update_tree_hash_cache().unwrap(); let (_, _, last_produced_block_hash, _) = match light_client_strategy { LightClientStrategy::Enabled => { self.add_attested_blocks_at_slots_with_lc_data( state, state_root, &slots, &validators, None, sync_committee_strategy, ) .await } LightClientStrategy::Disabled => { self.add_attested_blocks_at_slots_with_sync( state, state_root, &slots, &validators, sync_committee_strategy, ) .await } }; last_produced_block_hash.into() } /// Deprecated: Use add_attested_blocks_at_slots() instead /// /// Creates two forks: /// /// - The "honest" fork: created by the `honest_validators` who have built `honest_fork_blocks` /// on the head /// - The "faulty" fork: created by the `faulty_validators` who skipped a slot and /// then built `faulty_fork_blocks`. /// /// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain. pub async fn generate_two_forks_by_skipping_a_block( &self, honest_validators: &[usize], faulty_validators: &[usize], honest_fork_blocks: usize, faulty_fork_blocks: usize, ) -> (Hash256, Hash256) { let initial_head_slot = self.chain.head_snapshot().beacon_block.slot(); // Move to the next slot so we may produce some more blocks on the head. self.advance_slot(); // Extend the chain with blocks where only honest validators agree. let honest_head = self .extend_chain( honest_fork_blocks, BlockStrategy::OnCanonicalHead, AttestationStrategy::SomeValidators(honest_validators.to_vec()), ) .await; // Go back to the last block where all agreed, and build blocks upon it where only faulty nodes // agree. let faulty_head = self .extend_chain( faulty_fork_blocks, BlockStrategy::ForkCanonicalChainAt { previous_slot: initial_head_slot, // `initial_head_slot + 2` means one slot is skipped. first_slot: initial_head_slot + 2, }, AttestationStrategy::SomeValidators(faulty_validators.to_vec()), ) .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); (honest_head, faulty_head) } pub fn process_sync_contributions( &self, sync_contributions: HarnessSyncContributions, ) -> Result<(), SyncCommitteeError> { let mut verified_contributions = Vec::with_capacity(sync_contributions.len()); for (_, contribution_and_proof) in sync_contributions { let signed_contribution_and_proof = contribution_and_proof.unwrap(); let verified_contribution = self .chain .verify_sync_contribution_for_gossip(signed_contribution_and_proof)?; verified_contributions.push(verified_contribution); } for verified_contribution in verified_contributions { self.chain .add_contribution_to_block_inclusion_pool(verified_contribution)?; } Ok(()) } /// Simulate some of the blobs / data columns being seen on gossip. /// Converts the blobs to data columns if the slot is Fulu or later. pub async fn process_gossip_blobs_or_columns<'a>( &self, block: &SignedBeaconBlock, blobs: impl Iterator>, proofs: impl Iterator, custody_columns_opt: Option>, ) { let is_peerdas_enabled = self.chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); if is_peerdas_enabled { let custody_columns = custody_columns_opt.unwrap_or_else(|| { let sampling_column_count = self.get_sampling_column_count() as u64; (0..sampling_column_count).collect() }); let verified_columns = generate_data_column_sidecars_from_block(block, &self.spec) .into_iter() .filter(|c| custody_columns.contains(&c.index)) .map(|sidecar| { let column_index = sidecar.index; self.chain .verify_data_column_sidecar_for_gossip(sidecar, column_index) }) .collect::, _>>() .unwrap(); if !verified_columns.is_empty() { self.chain .process_gossip_data_columns(verified_columns, || Ok(())) .await .unwrap(); } } else { for (i, (kzg_proof, blob)) in proofs.into_iter().zip(blobs).enumerate() { let sidecar = Arc::new(BlobSidecar::new(i, blob.clone(), block, *kzg_proof).unwrap()); let gossip_blob = GossipVerifiedBlob::new(sidecar, i as u64, &self.chain) .expect("should obtain gossip verified blob"); self.chain .process_gossip_blob(gossip_blob) .await .expect("should import valid gossip verified blob"); } } } } // Junk `Debug` impl to satistfy certain trait bounds during testing. impl fmt::Debug for BeaconChainHarness { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "BeaconChainHarness") } } pub struct MakeAttestationOptions { /// Produce exactly `limit` attestations. pub limit: Option, /// Fork to use for signing attestations. pub fork: Fork, } pub enum NumBlobs { Random, Number(usize), None, } pub fn generate_rand_block_and_blobs( fork_name: ForkName, num_blobs: NumBlobs, rng: &mut impl Rng, spec: &ChainSpec, ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); let max_blobs = spec.max_blobs_per_block(block.epoch()) as usize; let mut blob_sidecars = vec![]; let bundle = match block { SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb { ref mut message, .. }) => { // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; let num_blobs = match num_blobs { NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; let (bundle, transactions) = execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { payload.execution_payload.transactions.push(tx).unwrap(); } message.body.blob_kzg_commitments = bundle.commitments.clone(); bundle } SignedBeaconBlock::Electra(SignedBeaconBlockElectra { ref mut message, .. }) => { // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadElectra = &mut message.body.execution_payload; let num_blobs = match num_blobs { NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; let (bundle, transactions) = execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { payload.execution_payload.transactions.push(tx).unwrap(); } message.body.blob_kzg_commitments = bundle.commitments.clone(); bundle } SignedBeaconBlock::Fulu(SignedBeaconBlockFulu { ref mut message, .. }) => { // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadFulu = &mut message.body.execution_payload; let num_blobs = match num_blobs { NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; let (bundle, transactions) = execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { payload.execution_payload.transactions.push(tx).unwrap(); } message.body.blob_kzg_commitments = bundle.commitments.clone(); bundle } _ => return (block, blob_sidecars), }; let eth2::types::BlobsBundle { commitments, proofs, blobs, } = bundle; for (index, ((blob, kzg_commitment), kzg_proof)) in blobs .into_iter() .zip(commitments.into_iter()) .zip(proofs.into_iter()) .enumerate() { blob_sidecars.push(BlobSidecar { index: index as u64, blob: blob.clone(), kzg_commitment, kzg_proof, signed_block_header: block.signed_block_header(), kzg_commitment_inclusion_proof: block .message() .body() .kzg_commitment_merkle_proof(index) .unwrap(), }); } (block, blob_sidecars) } pub fn generate_rand_block_and_data_columns( fork_name: ForkName, num_blobs: NumBlobs, rng: &mut impl Rng, spec: &ChainSpec, ) -> ( SignedBeaconBlock>, DataColumnSidecarList, ) { let (block, _blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); let data_columns = generate_data_column_sidecars_from_block(&block, spec); (block, data_columns) } /// Generate data column sidecars from pre-computed cells and proofs. fn generate_data_column_sidecars_from_block( block: &SignedBeaconBlock, spec: &ChainSpec, ) -> DataColumnSidecarList { let kzg_commitments = block.message().body().blob_kzg_commitments().unwrap(); if kzg_commitments.is_empty() { return vec![]; } let kzg_commitments_inclusion_proof = block .message() .body() .kzg_commitments_merkle_proof() .unwrap(); let signed_block_header = block.signed_block_header(); // load the precomputed column sidecar to avoid computing them for every block in the tests. let template_data_columns = RuntimeVariableList::>::from_ssz_bytes( TEST_DATA_COLUMN_SIDECARS_SSZ, spec.number_of_columns as usize, ) .unwrap(); let (cells, proofs) = template_data_columns .into_iter() .map(|sidecar| { let DataColumnSidecar { column, kzg_proofs, .. } = sidecar; // There's only one cell per column for a single blob let cell_bytes: Vec = column.into_iter().next().unwrap().into(); let kzg_cell = cell_bytes.try_into().unwrap(); let kzg_proof = kzg_proofs.into_iter().next().unwrap(); (kzg_cell, kzg_proof) }) .collect::<(Vec<_>, Vec<_>)>(); // Repeat the cells and proofs for every blob let blob_cells_and_proofs_vec = vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); kzg_commitments.len()]; build_data_column_sidecars( kzg_commitments.clone(), kzg_commitments_inclusion_proof, signed_block_header, blob_cells_and_proofs_vec, spec, ) .unwrap() }