diff --git a/Cargo.lock b/Cargo.lock index f196d29e5c..be6e2bf3f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -808,7 +808,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" dependencies = [ "account_utils", "beacon_chain", @@ -1046,7 +1046,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" dependencies = [ "beacon_node", "bytes", @@ -2570,7 +2570,7 @@ dependencies = [ "bls", "hex", "num-bigint-dig", - "ring 0.16.20", + "ring", "sha2 0.9.9", "zeroize", ] @@ -2741,7 +2741,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", - "ring 0.17.14", + "ring", "sha2 0.10.8", ] @@ -4592,7 +4592,7 @@ dependencies = [ "base64 0.22.1", "js-sys", "pem", - "ring 0.17.14", + "ring", "serde", "serde_json", "simple_asn1", @@ -4679,7 +4679,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.9.8", + "spin", ] [[package]] @@ -4690,7 +4690,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" dependencies = [ "account_utils", "beacon_chain", @@ -5062,7 +5062,7 @@ dependencies = [ "libp2p-tls", "quinn", "rand 0.8.5", - "ring 0.17.14", + "ring", "rustls 0.23.23", "socket2", "thiserror 2.0.12", @@ -5132,7 +5132,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "rcgen", - "ring 0.17.14", + "ring", "rustls 0.23.23", "rustls-webpki 0.101.7", "thiserror 2.0.12", @@ -5252,7 +5252,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" dependencies = [ "account_manager", "account_utils", @@ -6753,7 +6753,6 @@ dependencies = [ "lazy_static", "memchr", "parking_lot 0.12.3", - "protobuf", "thiserror 1.0.69", ] @@ -6824,12 +6823,6 @@ dependencies = [ "types", ] -[[package]] -name = "protobuf" -version = "2.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" - [[package]] name = "psutil" version = "3.3.0" @@ -6926,7 +6919,7 @@ dependencies = [ "bytes", "getrandom 0.2.15", "rand 0.8.5", - "ring 0.17.14", + "ring", "rustc-hash 2.1.1", "rustls 0.23.23", "rustls-pki-types", @@ -7090,7 +7083,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" dependencies = [ "pem", - "ring 0.17.14", + "ring", "rustls-pki-types", "time", "yasna", @@ -7271,21 +7264,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - [[package]] name = "ring" version = "0.17.14" @@ -7296,7 +7274,7 @@ dependencies = [ "cfg-if", "getrandom 0.2.15", "libc", - "untrusted 0.9.0", + "untrusted", "windows-sys 0.52.0", ] @@ -7517,7 +7495,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring 0.17.14", + "ring", "rustls-webpki 0.101.7", "sct", ] @@ -7529,7 +7507,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring 0.17.14", + "ring", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle", @@ -7543,7 +7521,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ "once_cell", - "ring 0.17.14", + "ring", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle", @@ -7583,8 +7561,8 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.14", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -7593,9 +7571,9 @@ version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "ring 0.17.14", + "ring", "rustls-pki-types", - "untrusted 0.9.0", + "untrusted", ] [[package]] @@ -7727,8 +7705,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.14", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -8179,7 +8157,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek", "rand_core 0.6.4", - "ring 0.17.14", + "ring", "rustc_version 0.4.1", "sha2 0.10.8", "subtle", @@ -8195,12 +8173,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -9219,12 +9191,6 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index 37ecd28eba..9d9141c922 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -159,7 +159,7 @@ mockito = "1.5.0" num_cpus = "1" parking_lot = "0.12" paste = "1" -prometheus = "0.13" +prometheus = { version = "0.13", default-features = false } quickcheck = "1" quickcheck_macros = "1" quote = "1" @@ -174,7 +174,7 @@ reqwest = { version = "0.11", default-features = false, features = [ "rustls-tls", "native-tls-vendored", ] } -ring = "0.16" +ring = "0.17" rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } serde = { version = "1", features = ["derive"] } diff --git a/Makefile b/Makefile index 3282e4fa0e..f621f38a63 100644 --- a/Makefile +++ b/Makefile @@ -250,7 +250,7 @@ install-audit: cargo install --force cargo-audit audit-CI: - cargo audit --ignore RUSTSEC-2025-0009 --ignore RUSTSEC-2024-0437 + cargo audit # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index e30705719e..a537a1722c 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" authors = [ "Paul Hauner ", "Age Manning BeaconChain { .state_root_at_slot(state_slot)? .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let state = self - .get_state(&state_root, Some(state_slot))? + .get_state(&state_root, Some(state_slot), true)? .ok_or(BeaconChainError::MissingBeaconState(state_root))?; if state.fork_name_unchecked().altair_enabled() { diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index baacd93c45..6f1174c1ba 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1126,6 +1126,12 @@ fn verify_head_block_is_known( } } + if !verify_attestation_is_finalized_checkpoint_or_descendant(attestation.data(), chain) { + return Err(Error::HeadBlockFinalized { + beacon_block_root: attestation.data().beacon_block_root, + }); + } + Ok(block) } else if chain.is_pre_finalization_block(attestation.data().beacon_block_root)? { Err(Error::HeadBlockFinalized { @@ -1359,6 +1365,29 @@ pub fn verify_committee_index(attestation: AttestationRef) -> Res Ok(()) } +fn verify_attestation_is_finalized_checkpoint_or_descendant( + attestation_data: &AttestationData, + chain: &BeaconChain, +) -> bool { + // If we have a split block newer than finalization then we also ban attestations which are not + // descended from that split block. It's important not to try checking `is_descendant` if + // finality is ahead of the split and the split block has been pruned, as `is_descendant` will + // return `false` in this case. + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let attestation_block_root = attestation_data.beacon_block_root; + let finalized_slot = fork_choice + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let split = chain.store.get_split_info(); + let is_descendant_from_split_block = split.slot == 0 + || split.slot <= finalized_slot + || fork_choice.is_descendant(split.block_root, attestation_block_root); + + fork_choice.is_finalized_checkpoint_or_descendant(attestation_block_root) + && is_descendant_from_split_block +} + /// Assists in readability. type CommitteesPerSlot = u64; diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index 7f356bd621..ae715afcd0 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -325,8 +325,10 @@ impl AttesterCache { return Ok(value); } + // We use `cache_state = true` here because if we are attesting to the state it's likely + // to be recent and useful for other things. let mut state: BeaconState = chain - .get_state(&state_root, None)? + .get_state(&state_root, None, true)? .ok_or(Error::MissingBeaconState(state_root))?; if state.slot() > slot { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0defbecf35..0ae9c77001 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -42,7 +42,7 @@ use crate::light_client_optimistic_update_verification::{ Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate, }; use crate::light_client_server_cache::LightClientServerCache; -use crate::migrate::BackgroundMigrator; +use crate::migrate::{BackgroundMigrator, ManualFinalizationNotification}; use crate::naive_aggregation_pool::{ AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, SyncContributionAggregateMap, @@ -118,8 +118,8 @@ use std::sync::Arc; use std::time::Duration; use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator}; use store::{ - BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, - KeyValueStoreOp, StoreItem, StoreOp, + BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, HotStateSummary, + KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::sync::oneshot; @@ -812,8 +812,10 @@ impl BeaconChain { let block = self .get_blinded_block(&block_root)? .ok_or(Error::MissingBeaconBlock(block_root))?; + // This method is only used in tests, so we may as well cache states to make CI go brr. + // TODO(release-v7) move this method out of beacon chain and into `store_tests`` or something equivalent. let state = self - .get_state(&block.state_root(), Some(block.slot()))? + .get_state(&block.state_root(), Some(block.slot()), true)? .ok_or_else(|| Error::MissingBeaconState(block.state_root()))?; let iter = BlockRootsIterator::owned(&self.store, state); Ok(std::iter::once(Ok((block_root, block.slot()))) @@ -1339,8 +1341,9 @@ impl BeaconChain { &self, state_root: &Hash256, slot: Option, + update_cache: bool, ) -> Result>, Error> { - Ok(self.store.get_state(state_root, slot)?) + Ok(self.store.get_state(state_root, slot, update_cache)?) } /// Return the sync committee at `slot + 1` from the canonical chain. @@ -1512,8 +1515,14 @@ impl BeaconChain { })? .ok_or(Error::NoStateForSlot(slot))?; + // This branch is mostly reached from the HTTP API when doing analysis, or in niche + // situations when producing a block. In the HTTP API case we assume the user wants + // to cache states so that future calls are faster, and that if the cache is + // struggling due to non-finality that they will dial down inessential calls. In the + // block proposal case we want to cache the state so that we can process the block + // quickly after it has been signed. Ok(self - .get_state(&state_root, Some(slot))? + .get_state(&state_root, Some(slot), true)? .ok_or(Error::NoStateForSlot(slot))?) } } @@ -1695,6 +1704,45 @@ impl BeaconChain { } } + pub fn manually_compact_database(&self) { + self.store_migrator.process_manual_compaction(); + } + + pub fn manually_finalize_state( + &self, + state_root: Hash256, + checkpoint: Checkpoint, + ) -> Result<(), Error> { + let HotStateSummary { + slot, + latest_block_root, + .. + } = self + .store + .load_hot_state_summary(&state_root) + .map_err(BeaconChainError::DBError)? + .ok_or(BeaconChainError::MissingHotStateSummary(state_root))?; + + if slot != checkpoint.epoch.start_slot(T::EthSpec::slots_per_epoch()) + || latest_block_root != *checkpoint.root + { + return Err(BeaconChainError::InvalidCheckpoint { + state_root, + checkpoint, + }); + } + + let notif = ManualFinalizationNotification { + state_root: state_root.into(), + checkpoint, + head_tracker: self.head_tracker.clone(), + genesis_block_root: self.genesis_block_root, + }; + + self.store_migrator.process_manual_finalization(notif); + Ok(()) + } + /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. /// /// The attestation will be obtained from `self.naive_aggregation_pool`. @@ -2839,6 +2887,15 @@ impl BeaconChain { chain_segment: Vec>, notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { + for block in chain_segment.iter() { + if let Err(error) = self.check_invalid_block_roots(block.block_root()) { + return ChainSegmentResult::Failed { + imported_blocks: vec![], + error, + }; + } + } + let mut imported_blocks = vec![]; // Filter uninteresting blocks from the chain segment in a blocking task. @@ -3330,6 +3387,15 @@ impl BeaconChain { self.remove_notified(&block_root, r) } + /// Check for known and configured invalid block roots before processing. + pub fn check_invalid_block_roots(&self, block_root: Hash256) -> Result<(), BlockError> { + if self.config.invalid_block_roots.contains(&block_root) { + Err(BlockError::KnownInvalidExecutionPayload(block_root)) + } else { + Ok(()) + } + } + /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and /// imported into the chain. /// @@ -6775,9 +6841,11 @@ impl BeaconChain { })?; let beacon_state_root = beacon_block.state_root(); + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let mut beacon_state = self .store - .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .get_state(&beacon_state_root, Some(beacon_block.slot()), true)? .ok_or_else(|| { Error::DBInconsistent(format!("Missing state {:?}", beacon_state_root)) })?; @@ -6929,8 +6997,10 @@ impl BeaconChain { if signed_beacon_block.slot() % T::EthSpec::slots_per_epoch() == 0 { let block = self.get_blinded_block(&block_hash).unwrap().unwrap(); + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let state = self - .get_state(&block.state_root(), Some(block.slot())) + .get_state(&block.state_root(), Some(block.slot()), true) .unwrap() .unwrap(); finalized_blocks.insert(state.finalized_checkpoint().root); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 88df48d0e9..599004d8bf 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -282,6 +282,9 @@ pub enum BlockError { /// problems to worry about than losing peers, and we're doing the network a favour by /// disconnecting. ParentExecutionPayloadInvalid { parent_root: Hash256 }, + /// This is a known invalid block that was listed in Lighthouses configuration. + /// At the moment this error is only relevant as part of the Holesky network recovery efforts. + KnownInvalidExecutionPayload(Hash256), /// The block is a slashable equivocation from the proposer. /// /// ## Peer scoring @@ -862,6 +865,9 @@ impl GossipVerifiedBlock { return Err(BlockError::DuplicateFullyImported(block_root)); } + // Do not process a block that is known to be invalid. + chain.check_invalid_block_roots(block_root)?; + // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. @@ -1080,6 +1086,9 @@ impl SignatureVerifiedBlock { .fork_name(&chain.spec) .map_err(BlockError::InconsistentFork)?; + // Check whether the block is a banned block prior to loading the parent. + chain.check_invalid_block_roots(block_root)?; + let (mut parent, block) = load_parent(block, chain)?; let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( @@ -1746,7 +1755,22 @@ pub fn check_block_is_finalized_checkpoint_or_descendant< fork_choice: &BeaconForkChoice, block: B, ) -> Result { - if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { + // If we have a split block newer than finalization then we also ban blocks which are not + // descended from that split block. It's important not to try checking `is_descendant` if + // finality is ahead of the split and the split block has been pruned, as `is_descendant` will + // return `false` in this case. + let finalized_slot = fork_choice + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let split = chain.store.get_split_info(); + let is_descendant_from_split_block = split.slot == 0 + || split.slot <= finalized_slot + || fork_choice.is_descendant(split.block_root, block.parent_root()); + + if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) + && is_descendant_from_split_block + { Ok(block) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 78216770e5..010190bfbc 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -282,8 +282,13 @@ where .get_blinded_block(&chain.genesis_block_root) .map_err(|e| descriptive_db_error("genesis block", &e))? .ok_or("Genesis block not found in store")?; + // We're resuming from some state in the db so it makes sense to cache it. let genesis_state = store - .get_state(&genesis_block.state_root(), Some(genesis_block.slot())) + .get_state( + &genesis_block.state_root(), + Some(genesis_block.slot()), + true, + ) .map_err(|e| descriptive_db_error("genesis state", &e))? .ok_or("Genesis state not found in store")?; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index bac47f5da7..d99c6038d3 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -773,6 +773,12 @@ impl BeaconChain { .execution_status .is_optimistic_or_invalid(); + // Update the state cache so it doesn't mistakenly prune the new head. + self.store + .state_cache + .lock() + .update_head_block_root(new_cached_head.head_block_root()); + // Detect and potentially report any re-orgs. let reorg_distance = detect_reorg( &old_snapshot.beacon_state, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index b881438c1c..808c96d965 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,7 +1,8 @@ pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde::{Deserialize, Serialize}; -use std::time::Duration; -use types::{Checkpoint, Epoch}; +use std::str::FromStr; +use std::{collections::HashSet, sync::LazyLock, time::Duration}; +use types::{Checkpoint, Epoch, Hash256}; pub const DEFAULT_RE_ORG_HEAD_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_PARENT_THRESHOLD: ReOrgThreshold = ReOrgThreshold(160); @@ -19,6 +20,12 @@ pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; /// Default sync tolerance epochs. pub const DEFAULT_SYNC_TOLERANCE_EPOCHS: u64 = 2; +/// Invalid block root to be banned from processing and importing on Holesky network by default. +pub static INVALID_HOLESKY_BLOCK_ROOT: LazyLock = LazyLock::new(|| { + Hash256::from_str("2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f359") + .expect("valid block root") +}); + #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { /// Maximum number of slots to skip when importing an attestation. @@ -104,6 +111,11 @@ pub struct ChainConfig { pub block_publishing_delay: Option, /// Artificial delay for data column publishing. For PeerDAS testing only. pub data_column_publishing_delay: Option, + /// Block roots of "banned" blocks which Lighthouse will refuse to import. + /// + /// On Holesky there is a block which is added to this set by default but which can be removed + /// by using `--invalid-block-roots ""`. + pub invalid_block_roots: HashSet, } impl Default for ChainConfig { @@ -142,6 +154,7 @@ impl Default for ChainConfig { sync_tolerance_epochs: DEFAULT_SYNC_TOLERANCE_EPOCHS, block_publishing_delay: None, data_column_publishing_delay: None, + invalid_block_roots: HashSet::new(), } } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 2e13ab4090..8509c52c8a 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -61,6 +61,7 @@ pub enum BeaconChainError { ForkChoiceStoreError(ForkChoiceStoreError), MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), + MissingHotStateSummary(Hash256), SlotProcessingError(SlotProcessingError), EpochProcessingError(EpochProcessingError), StateAdvanceError(StateAdvanceError), @@ -181,9 +182,9 @@ pub enum BeaconChainError { execution_block_hash: Option, }, ForkchoiceUpdate(execution_layer::Error), - FinalizedCheckpointMismatch { - head_state: Checkpoint, - fork_choice: Hash256, + InvalidCheckpoint { + state_root: Hash256, + checkpoint: Checkpoint, }, InvalidSlot(Slot), HeadBlockNotFullyVerified { diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index c500e1b4b6..cde2950c89 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -113,8 +113,9 @@ pub fn reset_fork_choice_to_finalization, Cold: It // Advance finalized state to finalized epoch (to handle skipped slots). let finalized_state_root = finalized_block.state_root(); + // The enshrined finalized state should be in the state cache. let mut finalized_state = store - .get_state(&finalized_state_root, Some(finalized_block.slot())) + .get_state(&finalized_state_root, Some(finalized_block.slot()), true) .map_err(|e| format!("Error loading finalized state: {:?}", e))? .ok_or_else(|| { format!( diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index c9173dc0d7..8e29be9732 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -317,8 +317,11 @@ impl LightClientServerCache { metrics::inc_counter(&metrics::LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS); // Compute the value, handling potential errors. + // This state should already be cached. By electing not to cache it here + // we remove any chance of the light client server from affecting the state cache. + // We'd like the light client server to be as minimally invasive as possible. let mut state = store - .get_state(block_state_root, Some(block_slot))? + .get_state(block_state_root, Some(block_slot), false)? .ok_or_else(|| { BeaconChainError::DBInconsistent(format!("Missing state {:?}", block_state_root)) })?; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index b64da00e76..cda5b34103 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -123,14 +123,23 @@ pub enum Notification { Finalization(FinalizationNotification), Reconstruction, PruneBlobs(Epoch), + ManualFinalization(ManualFinalizationNotification), + ManualCompaction, +} + +pub struct ManualFinalizationNotification { + pub state_root: BeaconStateHash, + pub checkpoint: Checkpoint, + pub head_tracker: Arc, + pub genesis_block_root: Hash256, } pub struct FinalizationNotification { - finalized_state_root: BeaconStateHash, - finalized_checkpoint: Checkpoint, - head_tracker: Arc, - prev_migration: Arc>, - genesis_block_root: Hash256, + pub finalized_state_root: BeaconStateHash, + pub finalized_checkpoint: Checkpoint, + pub head_tracker: Arc, + pub prev_migration: Arc>, + pub genesis_block_root: Hash256, } impl, Cold: ItemStore> BackgroundMigrator { @@ -187,6 +196,22 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator>, + notif: ManualFinalizationNotification, + ) { + // We create a "dummy" prev migration + let prev_migration = PrevMigration { + epoch: Epoch::new(1), + epochs_per_migration: 2, + }; + let notif = FinalizationNotification { + finalized_state_root: notif.state_root, + finalized_checkpoint: notif.checkpoint, + head_tracker: notif.head_tracker, + prev_migration: Arc::new(prev_migration.into()), + genesis_block_root: notif.genesis_block_root, + }; + Self::run_migration(db, notif); + } + /// Perform the actual work of `process_finalization`. fn run_migration(db: Arc>, notif: FinalizationNotification) { // Do not run too frequently. @@ -303,7 +347,8 @@ impl, Cold: ItemStore> BackgroundMigrator state, other => { error!( @@ -382,6 +427,15 @@ impl, Cold: ItemStore> BackgroundMigrator>) { + debug!("Running manual compaction"); + if let Err(error) = db.compact() { + warn!(?error, "Database compaction failed"); + } else { + debug!("Manual compaction completed"); + } + } + /// Spawn a new child thread to run the migration process. /// /// Return a channel handle for sending requests to the thread. @@ -394,16 +448,30 @@ impl, Cold: ItemStore> BackgroundMigrator reconstruction_notif = Some(notif), Notification::Finalization(fin) => finalization_notif = Some(fin), + Notification::ManualFinalization(fin) => manual_finalization_notif = Some(fin), Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab), + Notification::ManualCompaction => manual_compaction_notif = Some(notif), } // Read the rest of the messages in the channel, taking the best of each type. for notif in rx.try_iter() { match notif { Notification::Reconstruction => reconstruction_notif = Some(notif), + Notification::ManualCompaction => manual_compaction_notif = Some(notif), + Notification::ManualFinalization(fin) => { + if let Some(current) = manual_finalization_notif.as_mut() { + if fin.checkpoint.epoch > current.checkpoint.epoch { + *current = fin; + } + } else { + manual_finalization_notif = Some(fin); + } + } Notification::Finalization(fin) => { if let Some(current) = finalization_notif.as_mut() { if fin.finalized_checkpoint.epoch @@ -426,12 +494,18 @@ impl, Cold: ItemStore> BackgroundMigrator Option> { self.chain .store - .load_hot_state(&state_hash.into()) + .load_hot_state(&state_hash.into(), true) .unwrap() .map(|(state, _)| state) } diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index dcc63ddf62..30eec539fc 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -36,6 +36,9 @@ pub const VALIDATOR_COUNT: usize = 256; pub const CAPELLA_FORK_EPOCH: usize = 1; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// A cached set of keys. static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); @@ -1225,7 +1228,11 @@ async fn attestation_that_skips_epochs() { let mut state = harness .chain - .get_state(&earlier_block.state_root(), Some(earlier_slot)) + .get_state( + &earlier_block.state_root(), + Some(earlier_slot), + CACHE_STATE_IN_TESTS, + ) .expect("should not error getting state") .expect("should find state"); @@ -1329,9 +1336,14 @@ async fn attestation_validator_receive_proposer_reward_and_withdrawals() { .await; let current_slot = harness.get_current_slot(); + let mut state = harness .chain - .get_state(&earlier_block.state_root(), Some(earlier_slot)) + .get_state( + &earlier_block.state_root(), + Some(earlier_slot), + CACHE_STATE_IN_TESTS, + ) .expect("should not error getting state") .expect("should find state"); @@ -1399,7 +1411,11 @@ async fn attestation_to_finalized_block() { let mut state = harness .chain - .get_state(&earlier_block.state_root(), Some(earlier_slot)) + .get_state( + &earlier_block.state_root(), + Some(earlier_slot), + CACHE_STATE_IN_TESTS, + ) .expect("should not error getting state") .expect("should find state"); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index b75b06caff..6226ed39cb 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -18,6 +18,9 @@ use types::{ChainSpec, ForkName, Slot}; pub const VALIDATOR_COUNT: usize = 64; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + type E = MinimalEthSpec; static KEYPAIRS: LazyLock> = @@ -114,8 +117,13 @@ async fn test_sync_committee_rewards() { .get_blinded_block(&block.parent_root()) .unwrap() .unwrap(); + let parent_state = chain - .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .get_state( + &parent_block.state_root(), + Some(parent_block.slot()), + CACHE_STATE_IN_TESTS, + ) .unwrap() .unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 9212ed998d..38ff87d0c8 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -39,6 +39,9 @@ use types::*; pub const LOW_VALIDATOR_COUNT: usize = 24; pub const HIGH_VALIDATOR_COUNT: usize = 64; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// A cached set of keys. static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT)); @@ -756,6 +759,7 @@ async fn delete_blocks_and_states() { .get_state( &faulty_head_block.state_root(), Some(faulty_head_block.slot()), + CACHE_STATE_IN_TESTS, ) .expect("no db error") .expect("faulty head state exists"); @@ -769,7 +773,12 @@ async fn delete_blocks_and_states() { break; } store.delete_state(&state_root, slot).unwrap(); - assert_eq!(store.get_state(&state_root, Some(slot)).unwrap(), None); + assert_eq!( + store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap(), + None + ); } // Double-deleting should also be OK (deleting non-existent things is fine) @@ -1053,7 +1062,11 @@ fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconStat .unwrap(); harness .chain - .get_state(&head_block.state_root(), Some(head_block.slot())) + .get_state( + &head_block.state_root(), + Some(head_block.slot()), + CACHE_STATE_IN_TESTS, + ) .unwrap() .unwrap() } @@ -1890,7 +1903,10 @@ fn check_all_states_exist<'a>( states: impl Iterator, ) { for &state_hash in states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + let state = harness + .chain + .get_state(&state_hash.into(), None, CACHE_STATE_IN_TESTS) + .unwrap(); assert!( state.is_some(), "expected state {:?} to be in DB", @@ -1908,7 +1924,7 @@ fn check_no_states_exist<'a>( assert!( harness .chain - .get_state(&state_root.into(), None) + .get_state(&state_root.into(), None, CACHE_STATE_IN_TESTS) .unwrap() .is_none(), "state {:?} should not be in the DB", @@ -2342,7 +2358,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .get_or_reconstruct_blobs(&wss_block_root) .unwrap(); let wss_state = full_store - .get_state(&wss_state_root, Some(checkpoint_slot)) + .get_state(&wss_state_root, Some(checkpoint_slot), CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); @@ -2454,7 +2470,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Check that the new block's state can be loaded correctly. let mut state = beacon_chain .store - .get_state(&state_root, Some(slot)) + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); @@ -2584,7 +2600,10 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .unwrap() .map(Result::unwrap) { - let mut state = store.get_state(&state_root, Some(slot)).unwrap().unwrap(); + let mut state = store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); assert_eq!(state.slot(), slot); assert_eq!(state.canonical_root().unwrap(), state_root); } @@ -3410,9 +3429,10 @@ async fn prune_historic_states() { let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let genesis_state_root = harness.chain.genesis_state_root; + let genesis_state = harness .chain - .get_state(&genesis_state_root, None) + .get_state(&genesis_state_root, None, CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); @@ -3433,7 +3453,10 @@ async fn prune_historic_states() { .map(Result::unwrap) .collect::>(); for &(state_root, slot) in &first_epoch_state_roots { - assert!(store.get_state(&state_root, Some(slot)).unwrap().is_some()); + assert!(store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .is_some()); } store @@ -3448,7 +3471,10 @@ async fn prune_historic_states() { // Ensure all epoch 0 states other than the genesis have been pruned. for &(state_root, slot) in &first_epoch_state_roots { assert_eq!( - store.get_state(&state_root, Some(slot)).unwrap().is_some(), + store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .is_some(), slot == 0 ); } @@ -3574,7 +3600,7 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) { harness .chain .store - .get_state(&checkpoint.beacon_state_root(), None) + .get_state(&checkpoint.beacon_state_root(), None, CACHE_STATE_IN_TESTS) .expect("no error") .expect("state exists") .slot(), @@ -3636,7 +3662,7 @@ fn check_iterators(harness: &TestHarness) { harness .chain .store - .get_state(&state_root, Some(slot)) + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() .is_some(), "state {:?} from canonical chain should be in DB", diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 6d30b8a4e3..c8bbcce20d 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -21,6 +21,9 @@ pub type E = MainnetEthSpec; pub const VALIDATOR_COUNT: usize = 256; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// A cached set of keys. static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); @@ -755,7 +758,10 @@ async fn unaggregated_gossip_verification() { // Load the block and state for the given root. let block = chain.get_block(&root).await.unwrap().unwrap(); - let mut state = chain.get_state(&block.state_root(), None).unwrap().unwrap(); + let mut state = chain + .get_state(&block.state_root(), None, CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); // Advance the state to simulate a pre-state for block production. let slot = valid_sync_committee_message.slot + 1; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c641f32b82..c801361fd5 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -12,10 +12,12 @@ use operation_pool::PersistedOperationPool; use state_processing::{per_slot_processing, per_slot_processing::Error as SlotProcessingError}; use std::sync::LazyLock; use types::{ - BeaconState, BeaconStateError, BlockImportSource, EthSpec, Hash256, Keypair, MinimalEthSpec, - RelativeEpoch, Slot, + BeaconState, BeaconStateError, BlockImportSource, Checkpoint, EthSpec, Hash256, Keypair, + MinimalEthSpec, RelativeEpoch, Slot, }; +type E = MinimalEthSpec; + // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; @@ -24,12 +26,22 @@ static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness(validator_count: usize) -> BeaconChainHarness> { + get_harness_with_config( + validator_count, + ChainConfig { + reconstruct_historic_states: true, + ..Default::default() + }, + ) +} + +fn get_harness_with_config( + validator_count: usize, + chain_config: ChainConfig, +) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() - .chain_config(ChainConfig { - reconstruct_historic_states: true, - ..ChainConfig::default() - }) + .chain_config(chain_config) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() @@ -869,3 +881,165 @@ async fn block_roots_skip_slot_behaviour() { "WhenSlotSkipped::Prev should return None on a future slot" ); } + +async fn pseudo_finalize_test_generic( + epochs_per_migration: u64, + expect_true_finalization_migration: bool, +) { + // This test ensures that after pseudo finalization, we can still finalize the chain without issues + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; + + let chain_config = ChainConfig { + reconstruct_historic_states: true, + epochs_per_migration, + ..Default::default() + }; + let harness = get_harness_with_config(VALIDATOR_COUNT, chain_config); + + let one_third = VALIDATOR_COUNT / 3; + let attesters = (0..one_third).collect(); + + // extend the chain, but don't finalize + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; + + harness.advance_slot(); + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let split = harness.chain.store.get_split_info(); + + assert_eq!( + state.slot(), + num_blocks_produced, + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + num_blocks_produced / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + 0, + "There should be no justified checkpoint" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + 0, + "There should be no finalized checkpoint" + ); + assert_eq!(split.slot, 0, "Our split point should be unset"); + + let checkpoint = Checkpoint { + epoch: head.beacon_state.current_epoch(), + root: head.beacon_block_root, + }; + + // pseudo finalize + harness + .chain + .manually_finalize_state(head.beacon_state_root(), checkpoint) + .unwrap(); + + let split = harness.chain.store.get_split_info(); + let pseudo_finalized_slot = split.slot; + + assert_eq!( + state.current_justified_checkpoint().epoch, + 0, + "We pseudo finalized, but our justified checkpoint should still be unset" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + 0, + "We pseudo finalized, but our finalized checkpoint should still be unset" + ); + assert_eq!( + split.slot, + head.beacon_state.slot(), + "We pseudo finalized, our split point should be at the current head slot" + ); + + // finalize the chain + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + harness.advance_slot(); + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let split = harness.chain.store.get_split_info(); + + assert_eq!( + state.slot(), + num_blocks_produced * 2, + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + (num_blocks_produced * 2) / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + state.current_epoch() - 1, + "the head should be justified one behind the current epoch" + ); + let finalized_epoch = state.finalized_checkpoint().epoch; + assert_eq!( + finalized_epoch, + state.current_epoch() - 2, + "the head should be finalized two behind the current epoch" + ); + + let expected_split_slot = if pseudo_finalized_slot.epoch(E::slots_per_epoch()) + + epochs_per_migration + > finalized_epoch + { + pseudo_finalized_slot + } else { + finalized_epoch.start_slot(E::slots_per_epoch()) + }; + assert_eq!( + split.slot, expected_split_slot, + "We finalized, our split point should be updated according to epochs_per_migration" + ); + + // In the case that we did not process the true finalization migration (due to + // epochs_per_migration), check that the chain finalized *despite* the absence of the split + // block in fork choice. + // This is a regression test for https://github.com/sigp/lighthouse/pull/7105 + if !expect_true_finalization_migration { + assert_eq!(expected_split_slot, pseudo_finalized_slot); + assert!(!harness + .chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&split.block_root)); + } +} + +#[tokio::test] +async fn pseudo_finalize_basic() { + let epochs_per_migration = 0; + let expect_true_migration = true; + pseudo_finalize_test_generic(epochs_per_migration, expect_true_migration).await; +} + +#[tokio::test] +async fn pseudo_finalize_with_lagging_split_update() { + let epochs_per_migration = 10; + let expect_true_migration = false; + pseudo_finalize_test_generic(epochs_per_migration, expect_true_migration).await; +} diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 2f3f340445..23ab5e3752 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -126,8 +126,11 @@ pub fn get_attestation_performance( // Load state for block replay. let state_root = prior_block.state_root(); + + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let state = chain - .get_state(&state_root, Some(prior_slot)) + .get_state(&state_root, Some(prior_slot), true) .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) .map_err(unhandled_error)?; diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 431547f10b..249a6732dc 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -285,8 +285,10 @@ pub fn get_block_packing_efficiency( // Load state for block replay. let starting_state_root = first_block.state_root(); + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let starting_state = chain - .get_state(&starting_state_root, Some(prior_slot)) + .get_state(&starting_state_root, Some(prior_slot), true) .and_then(|maybe_state| { maybe_state.ok_or(BeaconChainError::MissingBeaconState(starting_state_root)) }) diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index fbb16e9540..29b23e89a7 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -42,8 +42,10 @@ pub fn get_block_rewards( .map_err(unhandled_error)? .ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?; + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let mut state = chain - .get_state(&state_root, Some(prior_slot)) + .get_state(&state_root, Some(prior_slot), true) .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) .map_err(unhandled_error)?; @@ -124,8 +126,10 @@ pub fn compute_block_rewards( )) })?; + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let parent_state = chain - .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .get_state(&parent_block.state_root(), Some(parent_block.slot()), true) .map_err(unhandled_error)? .ok_or_else(|| { custom_bad_request(format!( diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1b8cdc5605..368faae612 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -87,11 +87,11 @@ use tokio_stream::{ use tracing::{debug, error, info, warn}; use types::{ fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, - AttesterSlashing, BeaconStateError, ChainSpec, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, - ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, - RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncCommitteeMessage, SyncContributionData, + AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, + Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, + ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, + SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -1078,6 +1078,72 @@ pub fn serve( }, ); + // GET beacon/states/{state_id}/pending_deposits + let get_beacon_state_pending_deposits = beacon_states_path + .clone() + .and(warp::path("pending_deposits")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(deposits) = state.pending_deposits() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending deposits not found".to_string(), + )); + }; + + Ok((deposits.clone(), execution_optimistic, finalized)) + }, + )?; + + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ); + + // GET beacon/states/{state_id}/pending_partial_withdrawals + let get_beacon_state_pending_partial_withdrawals = beacon_states_path + .clone() + .and(warp::path("pending_partial_withdrawals")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(withdrawals) = state.pending_partial_withdrawals() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending withdrawals not found".to_string(), + )); + }; + + Ok((withdrawals.clone(), execution_optimistic, finalized)) + }, + )?; + + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -3903,6 +3969,52 @@ pub fn serve( }, ); + // POST lighthouse/finalize + let post_lighthouse_finalize = warp::path("lighthouse") + .and(warp::path("finalize")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |request_data: api_types::ManualFinalizationRequestData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let checkpoint = Checkpoint { + epoch: request_data.epoch, + root: request_data.block_root, + }; + + chain + .manually_finalize_state(request_data.state_root, checkpoint) + .map(|_| api_types::GenericResponse::from(request_data)) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "Failed to finalize state due to error: {e:?}" + )) + }) + }) + }, + ); + + // POST lighthouse/compaction + let post_lighthouse_compaction = warp::path("lighthouse") + .and(warp::path("compaction")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + chain.manually_compact_database(); + Ok(api_types::GenericResponse::from(String::from( + "Triggered manual compaction", + ))) + }) + }, + ); + // POST lighthouse/liveness let post_lighthouse_liveness = warp::path("lighthouse") .and(warp::path("liveness")) @@ -4557,6 +4669,8 @@ pub fn serve( .uor(get_beacon_state_committees) .uor(get_beacon_state_sync_committees) .uor(get_beacon_state_randao) + .uor(get_beacon_state_pending_deposits) + .uor(get_beacon_state_pending_partial_withdrawals) .uor(get_beacon_headers) .uor(get_beacon_headers_block_id) .uor(get_beacon_block) @@ -4658,6 +4772,8 @@ pub fn serve( .uor(post_lighthouse_block_rewards) .uor(post_lighthouse_ui_validator_metrics) .uor(post_lighthouse_ui_validator_info) + .uor(post_lighthouse_finalize) + .uor(post_lighthouse_compaction) .recover(warp_utils::reject::handle_rejection), ), ) diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 353390cdad..a9f66de467 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -189,8 +189,10 @@ impl StateId { _ => (self.root(chain)?, None), }; + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let state = chain - .get_state(&state_root, slot_opt) + .get_state(&state_root, slot_opt, true) .map_err(warp_utils::reject::unhandled_error) .and_then(|opt| { opt.ok_or_else(|| { diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index d7e8a1f9ef..9bc1f6ead4 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -56,8 +56,10 @@ pub fn get_state_before_applying_block( }) .map_err(|e| custom_not_found(format!("Parent block is not available! {:?}", e)))?; + // We are about to apply a new block to the chain. It's parent state + // is a useful/recent state, we elect to cache it. let parent_state = chain - .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .get_state(&parent_block.state_root(), Some(parent_block.slot()), true) .and_then(|maybe_state| { maybe_state .ok_or_else(|| BeaconChainError::MissingBeaconState(parent_block.state_root())) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index ddeda8b66d..768441533f 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1187,6 +1187,60 @@ impl ApiTester { self } + pub async fn test_beacon_states_pending_deposits(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = self + .client + .get_beacon_states_pending_deposits(state_id.0) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.pending_deposits().unwrap(); + + assert_eq!(result.unwrap(), expected.to_vec()); + } + + self + } + + pub async fn test_beacon_states_pending_partial_withdrawals(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = self + .client + .get_beacon_states_pending_partial_withdrawals(state_id.0) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.pending_partial_withdrawals().unwrap(); + + assert_eq!(result.unwrap(), expected.to_vec()); + } + + self + } + pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -6298,6 +6352,22 @@ async fn beacon_get_state_info() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_state_info_electra() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_beacon_states_pending_deposits() + .await + .test_beacon_states_pending_partial_withdrawals() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_get_blocks() { ApiTester::new() diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index afab2d178c..34dca4f100 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1403,6 +1403,7 @@ impl NetworkBeaconProcessor { | Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::ExecutionPayloadError(_)) | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) + | Err(e @ BlockError::KnownInvalidExecutionPayload(_)) | Err(e @ BlockError::GenesisBlock) => { warn!(error = %e, "Could not verify block for gossip. Rejecting the block"); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index da8e595ddc..7beadffc06 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use tokio_stream::StreamExt; use tracing::{debug, error, warn}; use types::blob_sidecar::BlobIdentifier; -use types::{Epoch, EthSpec, FixedBytesExtended, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256, Slot}; impl NetworkBeaconProcessor { /* Auxiliary functions */ @@ -93,20 +93,42 @@ impl NetworkBeaconProcessor { // current slot. This could be because they are using a different genesis time, or that // their or our system's clock is incorrect. Some("Different system clocks or genesis time".to_string()) - } else if remote.finalized_epoch <= local.finalized_epoch - && remote.finalized_root != Hash256::zero() - && local.finalized_root != Hash256::zero() - && self - .chain - .block_root_at_slot(start_slot(remote.finalized_epoch), WhenSlotSkipped::Prev) - .map(|root_opt| root_opt != Some(remote.finalized_root))? + } else if (remote.finalized_epoch == local.finalized_epoch + && remote.finalized_root == local.finalized_root) + || remote.finalized_root.is_zero() + || local.finalized_root.is_zero() + || remote.finalized_epoch > local.finalized_epoch { - // The remote's finalized epoch is less than or equal to ours, but the block root is - // different to the one in our chain. Therefore, the node is on a different chain and we - // should not communicate with them. - Some("Different finalized chain".to_string()) - } else { + // Fast path. Remote finalized checkpoint is either identical, or genesis, or we are at + // genesis, or they are ahead. In all cases, we should allow this peer to connect to us + // so we can sync from them. None + } else { + // Remote finalized epoch is less than ours. + let remote_finalized_slot = start_slot(remote.finalized_epoch); + if remote_finalized_slot < self.chain.store.get_oldest_block_slot() { + // Peer's finalized checkpoint is older than anything in our DB. We are unlikely + // to be able to help them sync. + Some("Old finality out of range".to_string()) + } else if remote_finalized_slot < self.chain.store.get_split_slot() { + // Peer's finalized slot is in range for a quick block root check in our freezer DB. + // If that block root check fails, reject them as they're on a different finalized + // chain. + if self + .chain + .block_root_at_slot(remote_finalized_slot, WhenSlotSkipped::Prev) + .map(|root_opt| root_opt != Some(remote.finalized_root))? + { + Some("Different finalized chain".to_string()) + } else { + None + } + } else { + // Peer's finality is older than ours, but newer than our split point, making a + // block root check infeasible. This case shouldn't happen particularly often so + // we give the peer the benefit of the doubt and let them connect to us. + None + } }; Ok(irrelevant_reason) diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 65097da0c6..a803e4d3eb 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -758,6 +758,18 @@ impl NetworkBeaconProcessor { debug!("Finalized or earlier block processed"); Ok(()) } + BlockError::NotFinalizedDescendant { block_parent_root } => { + debug!( + "Not syncing to a chain that conflicts with the canonical or manual finalized checkpoint" + ); + Err(ChainSegmentFailed { + message: format!( + "Block with parent_root {} conflicts with our checkpoint state", + block_parent_root + ), + peer_action: Some(PeerAction::Fatal), + }) + } BlockError::GenesisBlock => { debug!("Genesis block was processed"); Ok(()) @@ -817,6 +829,17 @@ impl NetworkBeaconProcessor { peer_action: Some(PeerAction::LowToleranceError), }) } + // Penalise peers for sending us banned blocks. + BlockError::KnownInvalidExecutionPayload(block_root) => { + warn!( + ?block_root, + "Received block known to be invalid", + ); + Err(ChainSegmentFailed { + message: format!("Banned block: {block_root:?}"), + peer_action: Some(PeerAction::Fatal), + }) + } other => { debug!( msg = "peer sent invalid block", diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 9934d34493..7d086dcc32 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -776,6 +776,15 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) + .arg( + Arg::new("state-cache-headroom") + .long("state-cache-headroom") + .value_name("N") + .help("Minimum number of states to cull from the state cache when it gets full") + .default_value("1") + .action(ArgAction::Set) + .display_order(0) + ) .arg( Arg::new("block-cache-size") .long("block-cache-size") @@ -812,7 +821,7 @@ pub fn cli_app() -> Command { .long("state-cache-size") .value_name("STATE_CACHE_SIZE") .help("Specifies the size of the state cache") - .default_value("128") + .default_value("32") .action(ArgAction::Set) .display_order(0) ) @@ -1009,7 +1018,7 @@ pub fn cli_app() -> Command { database when they are older than the data availability boundary \ relative to the current epoch.") .action(ArgAction::Set) - .default_value("1") + .default_value("256") .display_order(0) ) .arg( @@ -1646,5 +1655,13 @@ pub fn cli_app() -> Command { .hide(true) .display_order(0) ) + .arg( + Arg::new("invalid-block-roots") + .long("invalid-block-roots") + .value_name("FILE") + .help("Path to a comma separated file containing block roots that should be treated as invalid during block verification.") + .action(ArgAction::Set) + .hide(true) + ) .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index bcebc06c9c..7b97d36ab2 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -2,7 +2,7 @@ use account_utils::{read_input_from_user, STDIN_INPUTS_FLAG}; use beacon_chain::chain_config::{ DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, - DEFAULT_RE_ORG_PARENT_THRESHOLD, + DEFAULT_RE_ORG_PARENT_THRESHOLD, INVALID_HOLESKY_BLOCK_ROOT, }; use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::TrustedSetup; @@ -19,9 +19,10 @@ use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; use std::cmp::max; +use std::collections::HashSet; use std::fmt::Debug; use std::fs; -use std::io::IsTerminal; +use std::io::{IsTerminal, Read}; use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::num::NonZeroU16; @@ -449,6 +450,12 @@ pub fn get_config( client_config.chain.epochs_per_migration = epochs_per_migration; } + if let Some(state_cache_headroom) = + clap_utils::parse_optional(cli_args, "state-cache-headroom")? + { + client_config.store.state_cache_headroom = state_cache_headroom; + } + if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? { client_config.store.prune_blobs = prune_blobs; } @@ -897,6 +904,39 @@ pub fn get_config( client_config.chain.data_column_publishing_delay = Some(Duration::from_secs_f64(delay)); } + if let Some(invalid_block_roots_file_path) = + clap_utils::parse_optional::(cli_args, "invalid-block-roots")? + { + let mut file = std::fs::File::open(invalid_block_roots_file_path) + .map_err(|e| format!("Failed to open invalid-block-roots file: {}", e))?; + let mut contents = String::new(); + file.read_to_string(&mut contents) + .map_err(|e| format!("Failed to read invalid-block-roots file {}", e))?; + let invalid_block_roots: HashSet = contents + .split(',') + .filter_map( + |s| match Hash256::from_str(s.strip_prefix("0x").unwrap_or(s).trim()) { + Ok(block_root) => Some(block_root), + Err(error) => { + warn!( + block_root = s, + ?error, + "Unable to parse invalid block root", + ); + None + } + }, + ) + .collect(); + client_config.chain.invalid_block_roots = invalid_block_roots; + } else if spec + .config_name + .as_ref() + .is_some_and(|network_name| network_name == "holesky") + { + client_config.chain.invalid_block_roots = HashSet::from([*INVALID_HOLESKY_BLOCK_ROOT]); + } + Ok(client_config) } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 64765fd66a..a84573eb40 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -21,6 +21,7 @@ pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_EPOCHS_PER_STATE_DIFF: u64 = 8; pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(64); pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); +pub const DEFAULT_STATE_CACHE_HEADROOM: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_COMPRESSION_LEVEL: i32 = 1; pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_HDIFF_BUFFER_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); @@ -35,6 +36,8 @@ pub struct StoreConfig { pub block_cache_size: NonZeroUsize, /// Maximum number of states to store in the in-memory state cache. pub state_cache_size: NonZeroUsize, + /// Minimum number of states to cull from the state cache upon fullness. + pub state_cache_headroom: NonZeroUsize, /// Compression level for blocks, state diffs and other compressed values. pub compression_level: i32, /// Maximum number of historic states to store in the in-memory historic state cache. @@ -107,6 +110,7 @@ impl Default for StoreConfig { Self { block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, state_cache_size: DEFAULT_STATE_CACHE_SIZE, + state_cache_headroom: DEFAULT_STATE_CACHE_HEADROOM, historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, hdiff_buffer_cache_size: DEFAULT_HDIFF_BUFFER_CACHE_SIZE, compression_level: DEFAULT_COMPRESSION_LEVEL, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 0a545529ca..6a30d8a428 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -73,7 +73,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// Cache of beacon states. /// /// LOCK ORDERING: this lock must always be locked *after* the `split` if both are required. - state_cache: Mutex>, + pub state_cache: Mutex>, /// Cache of historic states and hierarchical diff buffers. /// /// This cache is never pruned. It is only populated in response to historical queries from the @@ -215,7 +215,10 @@ impl HotColdDB, MemoryStore> { blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), - state_cache: Mutex::new(StateCache::new(config.state_cache_size)), + state_cache: Mutex::new(StateCache::new( + config.state_cache_size, + config.state_cache_headroom, + )), historic_state_cache: Mutex::new(HistoricStateCache::new( config.hdiff_buffer_cache_size, config.historic_state_cache_size, @@ -259,7 +262,10 @@ impl HotColdDB, BeaconNodeBackend> { cold_db: BeaconNodeBackend::open(&config, cold_path)?, hot_db, block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), - state_cache: Mutex::new(StateCache::new(config.state_cache_size)), + state_cache: Mutex::new(StateCache::new( + config.state_cache_size, + config.state_cache_headroom, + )), historic_state_cache: Mutex::new(HistoricStateCache::new( config.hdiff_buffer_cache_size, config.historic_state_cache_size, @@ -934,6 +940,7 @@ impl, Cold: ItemStore> HotColdDB &self, state_root: &Hash256, slot: Option, + update_cache: bool, ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); @@ -945,10 +952,10 @@ impl, Cold: ItemStore> HotColdDB // chain. This way we avoid returning a state that doesn't match `state_root`. self.load_cold_state(state_root) } else { - self.get_hot_state(state_root) + self.get_hot_state(state_root, update_cache) } } else { - match self.get_hot_state(state_root)? { + match self.get_hot_state(state_root, update_cache)? { Some(state) => Ok(Some(state)), None => self.load_cold_state(state_root), } @@ -998,21 +1005,27 @@ impl, Cold: ItemStore> HotColdDB } else { state_root }; + // It's a bit redundant but we elect to cache the state here and down below. let mut opt_state = self - .load_hot_state(&state_root)? + .load_hot_state(&state_root, true)? .map(|(state, _block_root)| (state_root, state)); if let Some((state_root, state)) = opt_state.as_mut() { state.update_tree_hash_cache()?; state.build_all_caches(&self.spec)?; - self.state_cache - .lock() - .put_state(*state_root, block_root, state)?; - debug!( - ?state_root, - slot = %state.slot(), - "Cached state" - ); + if let PutStateOutcome::New(deleted_states) = + self.state_cache + .lock() + .put_state(*state_root, block_root, state)? + { + debug!( + ?state_root, + state_slot = %state.slot(), + ?deleted_states, + location = "get_advanced_hot_state", + "Cached state", + ); + } } drop(split); Ok(opt_state) @@ -1109,6 +1122,8 @@ impl, Cold: ItemStore> HotColdDB /// Load an epoch boundary state by using the hot state summary look-up. /// /// Will fall back to the cold DB if a hot state summary is not found. + /// + /// NOTE: only used in tests at the moment pub fn load_epoch_boundary_state( &self, state_root: &Hash256, @@ -1119,9 +1134,11 @@ impl, Cold: ItemStore> HotColdDB }) = self.load_hot_state_summary(state_root)? { // NOTE: minor inefficiency here because we load an unnecessary hot state summary - let (state, _) = self.load_hot_state(&epoch_boundary_state_root)?.ok_or( - HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), - )?; + let (state, _) = self + .load_hot_state(&epoch_boundary_state_root, true)? + .ok_or(HotColdDBError::MissingEpochBoundaryState( + epoch_boundary_state_root, + ))?; Ok(Some(state)) } else { // Try the cold DB @@ -1445,23 +1462,32 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - // Put the state in the cache. - let block_root = state.get_latest_block_root(*state_root); - // Avoid storing states in the database if they already exist in the state cache. // The exception to this is the finalized state, which must exist in the cache before it // is stored on disk. - if let PutStateOutcome::Duplicate = - self.state_cache - .lock() - .put_state(*state_root, block_root, state)? - { - debug!( - slot = %state.slot(), - ?state_root, - "Skipping storage of cached state" - ); - return Ok(()); + match self.state_cache.lock().put_state( + *state_root, + state.get_latest_block_root(*state_root), + state, + )? { + PutStateOutcome::New(deleted_states) => { + debug!( + ?state_root, + state_slot = %state.slot(), + ?deleted_states, + location = "store_hot_state", + "Cached state", + ); + } + PutStateOutcome::Duplicate => { + debug!( + ?state_root, + state_slot = %state.slot(), + "State already exists in state cache", + ); + return Ok(()); + } + PutStateOutcome::Finalized => {} // Continue to store. } // On the epoch boundary, store the full state. @@ -1485,7 +1511,11 @@ impl, Cold: ItemStore> HotColdDB } /// Get a post-finalization state from the database or store. - pub fn get_hot_state(&self, state_root: &Hash256) -> Result>, Error> { + pub fn get_hot_state( + &self, + state_root: &Hash256, + update_cache: bool, + ) -> Result>, Error> { if let Some(state) = self.state_cache.lock().get_by_state_root(*state_root) { return Ok(Some(state)); } @@ -1495,19 +1525,33 @@ impl, Cold: ItemStore> HotColdDB warn!(?state_root, "State cache missed"); } - let state_from_disk = self.load_hot_state(state_root)?; + let state_from_disk = self.load_hot_state(state_root, update_cache)?; if let Some((mut state, block_root)) = state_from_disk { state.update_tree_hash_cache()?; state.build_all_caches(&self.spec)?; - self.state_cache - .lock() - .put_state(*state_root, block_root, &state)?; - debug!( - ?state_root, - slot = %state.slot(), - "Cached state" - ); + if update_cache { + if let PutStateOutcome::New(deleted_states) = + self.state_cache + .lock() + .put_state(*state_root, block_root, &state)? + { + debug!( + ?state_root, + state_slot = %state.slot(), + ?deleted_states, + location = "get_hot_state", + "Cached state", + ); + } + } else { + debug!( + ?state_root, + state_slot = %state.slot(), + "Did not cache state", + ); + } + Ok(Some(state)) } else { Ok(None) @@ -1523,6 +1567,7 @@ impl, Cold: ItemStore> HotColdDB pub fn load_hot_state( &self, state_root: &Hash256, + update_cache: bool, ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); @@ -1554,25 +1599,28 @@ impl, Cold: ItemStore> HotColdDB let mut state = if slot % E::slots_per_epoch() == 0 { boundary_state } else { - // Cache ALL intermediate states that are reached during block replay. We may want - // to restrict this in future to only cache epoch boundary states. At worst we will - // cache up to 32 states for each state loaded, which should not flush out the cache - // entirely. + // If replaying blocks, and `update_cache` is true, also cache the epoch boundary + // state that this state is based on. It may be useful as the basis of more states + // in the same epoch. let state_cache_hook = |state_root, state: &mut BeaconState| { + if !update_cache || state.slot() % E::slots_per_epoch() != 0 { + return Ok(()); + } // Ensure all caches are built before attempting to cache. state.update_tree_hash_cache()?; state.build_all_caches(&self.spec)?; let latest_block_root = state.get_latest_block_root(state_root); - if let PutStateOutcome::New = + if let PutStateOutcome::New(_) = self.state_cache .lock() .put_state(state_root, latest_block_root, state)? { debug!( ?state_root, - %slot, - "Cached ancestor state" + state_slot = %state.slot(), + descendant_slot = %slot, + "Cached ancestor state", ); } Ok(()) @@ -2619,10 +2667,15 @@ impl, Cold: ItemStore> HotColdDB return Ok(()); }; - // Load the split state so we can backtrack to find execution payloads. - let split_state = self.get_state(&split.state_root, Some(split.slot))?.ok_or( - HotColdDBError::MissingSplitState(split.state_root, split.slot), - )?; + // Load the split state so we can backtrack to find execution payloads. The split state + // should be in the state cache as the enshrined finalized state, so this should never + // cache miss. + let split_state = self + .get_state(&split.state_root, Some(split.slot), true)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + ))?; // The finalized block may or may not have its execution payload stored, depending on // whether it was at a skipped slot. However for a fully pruned database its parent @@ -3080,8 +3133,10 @@ pub fn migrate_database, Cold: ItemStore>( // Store slot -> state_root and state_root -> slot mappings. store.store_cold_state_summary(&state_root, slot, &mut cold_db_ops)?; } else { + // This is some state that we want to migrate to the freezer db. + // There is no reason to cache this state. let state: BeaconState = store - .get_hot_state(&state_root)? + .get_hot_state(&state_root, false)? .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 0d12bbdd60..8419dde4a2 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -27,8 +27,10 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> &self, store: &'a HotColdDB, ) -> Option> { + // Ancestor roots and their states are probably in the cold db + // but we set `update_cache` to false just in case let state = store - .get_state(&self.message().state_root(), Some(self.slot())) + .get_state(&self.message().state_root(), Some(self.slot()), false) .ok()??; Some(BlockRootsIterator::owned(store, state)) @@ -189,8 +191,10 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, E, let block = store .get_blinded_block(&block_hash)? .ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?; + // We are querying some block from the database. It's not clear if the block's state is useful, + // we elect not to cache it. let state = store - .get_state(&block.state_root(), Some(block.slot()))? + .get_state(&block.state_root(), Some(block.slot()), false)? .ok_or_else(|| BeaconStateError::MissingBeaconState(block.state_root().into()))?; Ok(Self::owned(store, state)) } @@ -362,8 +366,9 @@ fn next_historical_root_backtrack_state, Cold: Ite if new_state_slot >= historic_state_upper_limit { let new_state_root = current_state.get_state_root(new_state_slot)?; + // We are backtracking through historical states, we don't want to cache these. Ok(store - .get_state(new_state_root, Some(new_state_slot))? + .get_state(new_state_root, Some(new_state_slot), false)? .ok_or_else(|| BeaconStateError::MissingBeaconState((*new_state_root).into()))?) } else { Err(Error::HistoryUnavailable) diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 96e4de4639..281ecab152 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -33,26 +33,33 @@ pub struct SlotMap { #[derive(Debug)] pub struct StateCache { finalized_state: Option>, - states: LruCache>, + // Stores the tuple (state_root, state) as LruCache only returns the value on put and we need + // the state_root + states: LruCache)>, block_map: BlockMap, max_epoch: Epoch, + head_block_root: Hash256, + headroom: NonZeroUsize, } #[derive(Debug)] pub enum PutStateOutcome { Finalized, Duplicate, - New, + /// Includes deleted states as a result of this insertion + New(Vec), } #[allow(clippy::len_without_is_empty)] impl StateCache { - pub fn new(capacity: NonZeroUsize) -> Self { + pub fn new(capacity: NonZeroUsize, headroom: NonZeroUsize) -> Self { StateCache { finalized_state: None, states: LruCache::new(capacity), block_map: BlockMap::default(), max_epoch: Epoch::new(0), + head_block_root: Hash256::ZERO, + headroom, } } @@ -98,6 +105,13 @@ impl StateCache { Ok(()) } + /// Update the state cache's view of the enshrined head block. + /// + /// We never prune the unadvanced state for the head block. + pub fn update_head_block_root(&mut self, head_block_root: Hash256) { + self.head_block_root = head_block_root; + } + /// Rebase the given state on the finalized state in order to reduce its memory consumption. /// /// This function should only be called on states that are likely not to already share tree @@ -147,18 +161,26 @@ impl StateCache { self.max_epoch = std::cmp::max(state.current_epoch(), self.max_epoch); // If the cache is full, use the custom cull routine to make room. - if let Some(over_capacity) = self.len().checked_sub(self.capacity()) { - self.cull(over_capacity + 1); - } + let mut deleted_states = + if let Some(over_capacity) = self.len().checked_sub(self.capacity()) { + // The `over_capacity` should always be 0, but we add it here just in case. + self.cull(over_capacity + self.headroom.get()) + } else { + vec![] + }; // Insert the full state into the cache. - self.states.put(state_root, state.clone()); + if let Some((deleted_state_root, _)) = + self.states.put(state_root, (state_root, state.clone())) + { + deleted_states.push(deleted_state_root); + } // Record the connection from block root and slot to this state. let slot = state.slot(); self.block_map.insert(block_root, slot, state_root); - Ok(PutStateOutcome::New) + Ok(PutStateOutcome::New(deleted_states)) } pub fn get_by_state_root(&mut self, state_root: Hash256) -> Option> { @@ -167,7 +189,7 @@ impl StateCache { return Some(finalized_state.state.clone()); } } - self.states.get(&state_root).cloned() + self.states.get(&state_root).map(|(_, state)| state.clone()) } pub fn get_by_block_root( @@ -211,7 +233,7 @@ impl StateCache { /// - Mid-epoch unadvanced states. /// - Epoch-boundary states that are too old to be finalized. /// - Epoch-boundary states that could be finalized. - pub fn cull(&mut self, count: usize) { + pub fn cull(&mut self, count: usize) -> Vec { let cull_exempt = std::cmp::max( 1, self.len() * CULL_EXEMPT_NUMERATOR / CULL_EXEMPT_DENOMINATOR, @@ -222,7 +244,8 @@ impl StateCache { let mut mid_epoch_state_roots = vec![]; let mut old_boundary_state_roots = vec![]; let mut good_boundary_state_roots = vec![]; - for (&state_root, state) in self.states.iter().skip(cull_exempt) { + + for (&state_root, (_, state)) in self.states.iter().skip(cull_exempt) { let is_advanced = state.slot() > state.latest_block_header().slot; let is_boundary = state.slot() % E::slots_per_epoch() == 0; let could_finalize = @@ -236,7 +259,8 @@ impl StateCache { } } else if is_advanced { advanced_state_roots.push(state_root); - } else { + } else if state.get_latest_block_root(state_root) != self.head_block_root { + // Never prune the head state mid_epoch_state_roots.push(state_root); } @@ -248,15 +272,19 @@ impl StateCache { // Stage 2: delete. // This could probably be more efficient in how it interacts with the block map. - for state_root in advanced_state_roots - .iter() - .chain(mid_epoch_state_roots.iter()) - .chain(old_boundary_state_roots.iter()) - .chain(good_boundary_state_roots.iter()) + let state_roots_to_delete = advanced_state_roots + .into_iter() + .chain(old_boundary_state_roots) + .chain(mid_epoch_state_roots) + .chain(good_boundary_state_roots) .take(count) - { + .collect::>(); + + for state_root in &state_roots_to_delete { self.delete_state(state_root); } + + state_roots_to_delete } } diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 4c76647c0c..a99aae30b1 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -118,7 +118,7 @@ Options: --epochs-per-blob-prune The epoch interval with which to prune blobs from Lighthouse's database when they are older than the data availability boundary - relative to the current epoch. [default: 1] + relative to the current epoch. [default: 256] --epochs-per-migration The number of epochs to wait between running the migration of data from the hot DB to the cold DB. Less frequent runs can be useful for @@ -286,7 +286,7 @@ Options: monitoring-endpoint. Default: 60s --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --network-dir Data directory for network keys. Defaults to network/ inside the beacon node dir. @@ -381,8 +381,11 @@ Options: Number of validators per chunk stored on disk. --slots-per-restore-point DEPRECATED. This flag has no effect. + --state-cache-headroom + Minimum number of states to cull from the state cache when it gets + full [default: 1] --state-cache-size - Specifies the size of the state cache [default: 128] + Specifies the size of the state cache [default: 32] --suggested-fee-recipient Emergency fallback fee recipient for use in case the validator client does not have one configured. You should set this flag on the diff --git a/book/src/help_general.md b/book/src/help_general.md index 4d0d4104d4..9c449b2835 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -72,7 +72,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 7fb655910f..fb2c8de876 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -115,7 +115,7 @@ Options: monitoring-endpoint. [default: 60] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --proposer-nodes Comma-separated addresses to one or more beacon node HTTP APIs. These specify nodes that are used to send beacon block proposals. A failure diff --git a/book/src/help_vm.md b/book/src/help_vm.md index 0d9d2a2e4b..3907064696 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -69,7 +69,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 4f3774df10..d83b1000b9 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -78,7 +78,7 @@ Options: If present, the mnemonic will be read in from this file. --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --output-path The path to a directory where the validator and (optionally) deposits files will be created. The directory will be created if it does not diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 28690d3a11..2dd7d5f70a 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -61,7 +61,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --password Password of the keystore file. --prefer-builder-proposals diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index af4a1a4d6d..2f068a1f88 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -65,7 +65,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --prefer-builder-proposals If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 362b598c9f..c9f0c04fca 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" authors = ["Sigma Prime "] edition = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 358fe12a84..1c55220b50 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -781,6 +781,45 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `GET beacon/states/{state_id}/pending_deposits` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_pending_deposits( + &self, + state_id: StateId, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("pending_deposits"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/pending_partial_withdrawals` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_pending_partial_withdrawals( + &self, + state_id: StateId, + ) -> Result>>, Error> + { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("pending_partial_withdrawals"); + + self.get_opt(path).await + } + /// `GET beacon/light_client/updates` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 5cdbf80b05..9839fcfda4 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1404,6 +1404,13 @@ pub struct StandardLivenessResponseData { pub is_live: bool, } +#[derive(Debug, Serialize, Deserialize)] +pub struct ManualFinalizationRequestData { + pub state_root: Hash256, + pub epoch: Epoch, + pub block_root: Hash256, +} + #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 50386feb8a..017bdf288d 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -35,6 +35,17 @@ const HOLESKY_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url genesis_state_root: "0x0ea3f6f9515823b59c863454675fefcd1d8b4f2dbe454db166206a41fda060a0", }; +const HOODI_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { + urls: &[ + // This is an AWS S3 bucket hosted by Sigma Prime. See Paul Hauner for + // more details. + "https://sigp-public-genesis-states.s3.ap-southeast-2.amazonaws.com/hoodi/", + ], + checksum: "0x7f42257ef69e055496c964a753bb07e54001ccd57ab467ef72d67af086bcfce7", + genesis_validators_root: "0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f", + genesis_state_root: "0x2683ebc120f91f740c7bed4c866672d01e1ba51b4cc360297138465ee5df40f0", +}; + const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { // No default checkpoint sources are provided. urls: &[], @@ -328,5 +339,14 @@ define_hardcoded_nets!( "holesky", // Describes how the genesis state can be obtained. HOLESKY_GENESIS_STATE_SOURCE + ), + ( + // Network name (must be unique among all networks). + hoodi, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "hoodi", + // Describes how the genesis state can be obtained. + HOODI_GENESIS_STATE_SOURCE ) ); diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml new file mode 100644 index 0000000000..33eaa7e8a9 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml @@ -0,0 +1,13 @@ +# hoodi consensus layer bootnodes +# --------------------------------------- +# 1. Tag nodes with maintainer +# 2. Keep nodes updated +# 3. Review PRs: check ENR duplicates, fork-digest, connection. + +# EF +- enr:-Mq4QLkmuSwbGBUph1r7iHopzRpdqE-gcm5LNZfcE-6T37OCZbRHi22bXZkaqnZ6XdIyEDTelnkmMEQB8w6NbnJUt9GGAZWaowaYh2F0dG5ldHOIABgAAAAAAACEZXRoMpDS8Zl_YAAJEAAIAAAAAAAAgmlkgnY0gmlwhNEmfKCEcXVpY4IyyIlzZWNwMjU2azGhA0hGa4jZJZYQAS-z6ZFK-m4GCFnWS8wfjO0bpSQn6hyEiHN5bmNuZXRzAIN0Y3CCIyiDdWRwgiMo +- enr:-Ku4QLVumWTwyOUVS4ajqq8ZuZz2ik6t3Gtq0Ozxqecj0qNZWpMnudcvTs-4jrlwYRQMQwBS8Pvtmu4ZPP2Lx3i2t7YBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhNEmfKCJc2VjcDI1NmsxoQLdRlI8aCa_ELwTJhVN8k7km7IDc3pYu-FMYBs5_FiigIN1ZHCCIyk +- enr:-LK4QAYuLujoiaqCAs0-qNWj9oFws1B4iy-Hff1bRB7wpQCYSS-IIMxLWCn7sWloTJzC1SiH8Y7lMQ5I36ynGV1ASj4Eh2F0dG5ldHOIYAAAAAAAAACEZXRoMpDS8Zl_YAAJEAAIAAAAAAAAgmlkgnY0gmlwhIbRilSJc2VjcDI1NmsxoQOmI5MlAu3f5WEThAYOqoygpS2wYn0XS5NV2aYq7T0a04N0Y3CCIyiDdWRwgiMo +- enr:-Ku4QIC89sMC0o-irosD4_23lJJ4qCGOvdUz7SmoShWx0k6AaxCFTKviEHa-sa7-EzsiXpDp0qP0xzX6nKdXJX3X-IQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhIbRilSJc2VjcDI1NmsxoQK_m0f1DzDc9Cjrspm36zuRa7072HSiMGYWLsKiVSbP34N1ZHCCIyk +- enr:-Ku4QNkWjw5tNzo8DtWqKm7CnDdIq_y7xppD6c1EZSwjB8rMOkSFA1wJPLoKrq5UvA7wcxIotH6Usx3PAugEN2JMncIBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhIbHuBeJc2VjcDI1NmsxoQP3FwrhFYB60djwRjAoOjttq6du94DtkQuaN99wvgqaIYN1ZHCCIyk +- enr:-OS4QMJGE13xEROqvKN1xnnt7U-noc51VXyM6wFMuL9LMhQDfo1p1dF_zFdS4OsnXz_vIYk-nQWnqJMWRDKvkSK6_CwDh2F0dG5ldHOIAAAAADAAAACGY2xpZW502IpMaWdodGhvdXNljDcuMC4wLWJldGEuM4RldGgykNLxmX9gAAkQAAgAAAAAAACCaWSCdjSCaXCEhse4F4RxdWljgiMqiXNlY3AyNTZrMaECef77P8k5l3PC_raLw42OAzdXfxeQ-58BJriNaqiRGJSIc3luY25ldHMAg3RjcIIjKIN1ZHCCIyg diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml new file mode 100644 index 0000000000..19d7797424 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml @@ -0,0 +1,165 @@ +# Extends the mainnet preset +PRESET_BASE: mainnet +CONFIG_NAME: hoodi + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 +# 2025-Mar-17 12:00:00 PM UTC +MIN_GENESIS_TIME: 1742212800 +GENESIS_FORK_VERSION: 0x10000910 +GENESIS_DELAY: 600 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x20000910 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x30000910 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x40000910 +CAPELLA_FORK_EPOCH: 0 + +# DENEB +DENEB_FORK_VERSION: 0x50000910 +DENEB_FORK_EPOCH: 0 + +# Electra +ELECTRA_FORK_VERSION: 0x60000910 +ELECTRA_FORK_EPOCH: 2048 + +# Fulu +FULU_FORK_VERSION: 0x70000910 +FULU_FORK_EPOCH: 18446744073709551615 + + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 12 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 560048 +DEPOSIT_NETWORK_ID: 560048 +DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +## `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 + +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(6)` +TARGET_BLOBS_PER_BLOCK_ELECTRA: 6 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + +# Whisk +# `Epoch(2**8)` +WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 +# `Epoch(2)` +WHISK_PROPOSER_SELECTION_GAP: 2 + +# Fulu +NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + +# EIP7732 +MAX_REQUEST_PAYLOADS: 128 diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/deposit_contract_block.txt b/common/eth2_network_config/built_in_network_configs/hoodi/deposit_contract_block.txt new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/hoodi/deposit_contract_block.txt @@ -0,0 +1 @@ +0 diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index cfffdbbb09..1c62cd7b8a 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.0.0-beta.0-", - fallback = "Lighthouse/v7.0.0-beta.0" + prefix = "Lighthouse/v7.0.0-beta.4-", + fallback = "Lighthouse/v7.0.0-beta.4" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.0.0-beta.0" + "7.0.0-beta.4" } /// Returns the name of the current client running. diff --git a/common/logging/src/sse_logging_components.rs b/common/logging/src/sse_logging_components.rs index e358fde6c6..a25b5be6c5 100644 --- a/common/logging/src/sse_logging_components.rs +++ b/common/logging/src/sse_logging_components.rs @@ -1,3 +1,4 @@ +// TODO(tracing) fix the comments below and remove reference of slog::Drain //! This module provides an implementation of `slog::Drain` that optionally writes to a channel if //! there are subscribers to a HTTP SSE stream. diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 28a3ecdd02..91b44c7af1 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1256,6 +1256,11 @@ where .is_finalized_checkpoint_or_descendant::(block_root) } + pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { + self.proto_array + .is_descendant(ancestor_root, descendant_root) + } + /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. /// /// Returns `Ok(false)` if `block_root`'s execution payload has been elected as fully VALID, if diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index b224cde048..95bdee574d 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -25,6 +25,9 @@ pub type E = MainnetEthSpec; pub const VALIDATOR_COUNT: usize = 64; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// Defines some delay between when an attestation is created and when it is mutated. pub enum MutationDelay { /// No delay between creation and mutation. @@ -373,7 +376,7 @@ impl ForkChoiceTest { let state = harness .chain .store - .get_state(&state_root, None) + .get_state(&state_root, None, CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); let balances = state diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index c59449634a..34e9ff120d 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -22,6 +22,9 @@ pub const VALIDATOR_COUNT: usize = 64; pub const EPOCH_OFFSET: u64 = 4; pub const NUM_ATTESTATIONS: u64 = 1; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// A cached set of keys. static KEYPAIRS: LazyLock> = LazyLock::new(|| generate_deterministic_keypairs(MAX_VALIDATOR_COUNT)); @@ -1114,9 +1117,10 @@ async fn block_replayer_peeking_state_roots() { .get_blinded_block(&parent_block_root) .unwrap() .unwrap(); + // Cache the state to make CI go brr. let parent_state = harness .chain - .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .get_state(&parent_block.state_root(), Some(parent_block.slot()), true) .unwrap() .unwrap(); diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 0dd91edc3c..e1fce47975 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -213,12 +213,16 @@ impl LightClientUpdate { .map_err(|_| Error::InconsistentFork)? { ForkName::Base => return Err(Error::AltairForkNotActive), - ForkName::Altair | ForkName::Bellatrix => { + fork_name @ ForkName::Altair | fork_name @ ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderAltair::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderAltair::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderAltair::default() + } } else { LightClientHeaderAltair::default() }; @@ -233,12 +237,16 @@ impl LightClientUpdate { signature_slot: block_slot, }) } - ForkName::Capella => { + fork_name @ ForkName::Capella => { let attested_header = LightClientHeaderCapella::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderCapella::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderCapella::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderCapella::default() + } } else { LightClientHeaderCapella::default() }; @@ -253,12 +261,16 @@ impl LightClientUpdate { signature_slot: block_slot, }) } - ForkName::Deneb => { + fork_name @ ForkName::Deneb => { let attested_header = LightClientHeaderDeneb::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderDeneb::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderDeneb::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderDeneb::default() + } } else { LightClientHeaderDeneb::default() }; @@ -273,12 +285,16 @@ impl LightClientUpdate { signature_slot: block_slot, }) } - ForkName::Electra => { + fork_name @ ForkName::Electra => { let attested_header = LightClientHeaderElectra::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderElectra::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderElectra::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderElectra::default() + } } else { LightClientHeaderElectra::default() }; @@ -293,12 +309,16 @@ impl LightClientUpdate { signature_slot: block_slot, }) } - ForkName::Fulu => { + fork_name @ ForkName::Fulu => { let attested_header = LightClientHeaderFulu::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderFulu::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderFulu::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderFulu::default() + } } else { LightClientHeaderFulu::default() }; diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index b7c226f8cd..c4f4d1699c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index d941293e91..b968440f44 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 96c1b5313a..28594af1da 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1873,7 +1873,7 @@ fn block_cache_size_flag() { fn state_cache_size_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(128))); + .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(32))); } #[test] fn state_cache_size_flag() { @@ -1883,6 +1883,21 @@ fn state_cache_size_flag() { .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(64))); } #[test] +fn state_cache_headroom_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.state_cache_headroom, new_non_zero_usize(1))); +} +#[test] +fn state_cache_headroom_flag() { + CommandLineTest::new() + .flag("state-cache-headroom", Some("16")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.store.state_cache_headroom, new_non_zero_usize(16)) + }); +} +#[test] fn historic_state_cache_size_flag() { CommandLineTest::new() .flag("historic-state-cache-size", Some("4")) @@ -1972,7 +1987,7 @@ fn prune_blobs_on_startup_false() { fn epochs_per_blob_prune_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert!(config.store.epochs_per_blob_prune == 1)); + .with_config(|config| assert_eq!(config.store.epochs_per_blob_prune, 256)); } #[test] fn epochs_per_blob_prune_on_startup_five() { @@ -2796,4 +2811,41 @@ fn data_column_publishing_delay_for_testing() { Some(Duration::from_secs_f64(3.5f64)) ); }); +fn invalid_block_roots_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + let mut file = + File::create(dir.path().join("invalid-block-roots")).expect("Unable to create file"); + file.write_all(b"2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f359, 2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f358, 0x3db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f358") + .expect("Unable to write to file"); + CommandLineTest::new() + .flag( + "invalid-block-roots", + dir.path().join("invalid-block-roots").as_os_str().to_str(), + ) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.invalid_block_roots.len(), 3)) +} + +#[test] +fn invalid_block_roots_default_holesky() { + use beacon_node::beacon_chain::chain_config::INVALID_HOLESKY_BLOCK_ROOT; + CommandLineTest::new() + .flag("network", Some("holesky")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.invalid_block_roots.len(), 1); + assert!(config + .chain + .invalid_block_roots + .contains(&*INVALID_HOLESKY_BLOCK_ROOT)); + }) +} + +#[test] +fn invalid_block_roots_default_mainnet() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(config.chain.invalid_block_roots.is_empty()); + }) } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 2f97cdf5b9..01c87b40fc 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -29,6 +29,9 @@ use types::{ IndexedAttestation, KzgProof, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, }; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] #[serde(deny_unknown_fields)] pub struct PowBlock { @@ -545,10 +548,15 @@ impl Tester { .unwrap() { let parent_state_root = parent_block.state_root(); + let mut state = self .harness .chain - .get_state(&parent_state_root, Some(parent_block.slot())) + .get_state( + &parent_state_root, + Some(parent_block.slot()), + CACHE_STATE_IN_TESTS, + ) .unwrap() .unwrap(); diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 03cc17fab3..35c2508b53 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -264,6 +264,11 @@ pub(crate) async fn verify_light_client_updates( let slot = Slot::new(slot); let previous_slot = slot - 1; + let sync_committee_period = slot + .epoch(E::slots_per_epoch()) + .sync_committee_period(&E::default_spec()) + .unwrap(); + let previous_slot_block = client .get_beacon_blocks::(BlockId::Slot(previous_slot)) .await @@ -329,6 +334,20 @@ pub(crate) async fn verify_light_client_updates( "Existing finality update too old: signature slot {signature_slot}, current slot {slot:?}" )); } + + let light_client_updates = client + .get_beacon_light_client_updates::(sync_committee_period, 1) + .await + .map_err(|e| format!("Error while getting light client update: {:?}", e))? + .ok_or(format!("Light client update not found {slot:?}"))?; + + // Ensure we're only storing a single light client update for the given sync committee period + if light_client_updates.len() != 1 { + return Err(format!( + "{} light client updates was returned when only one was expected.", + light_client_updates.len() + )); + } } Ok(())