diff --git a/Cargo.lock b/Cargo.lock index 984e4da8d3..506ae6cfaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -808,7 +808,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" dependencies = [ "account_utils", "beacon_chain", @@ -1046,7 +1046,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" dependencies = [ "beacon_node", "bytes", @@ -1915,7 +1915,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f" dependencies = [ "data-encoding", - "syn 2.0.100", + "syn 1.0.109", ] [[package]] @@ -4691,7 +4691,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" dependencies = [ "account_utils", "beacon_chain", @@ -4762,7 +4762,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -4884,8 +4884,8 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.48.1" -source = "git+https://github.com/sigp/rust-libp2p.git?branch=sigp-gossipsub#3e24b1bbec5fae182595aee0958f823be87afaad" +version = "0.49.0" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=7a36e4c#7a36e4cde83041f1bd5f2078c4d3934ccb16777e" dependencies = [ "async-channel 2.3.1", "asynchronous-codec", @@ -5253,7 +5253,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" dependencies = [ "account_manager", "account_utils", @@ -7293,9 +7293,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.13" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ac5d832aa16abd7d1def883a8545280c20a60f523a370aa3a9617c2b8550ee" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", @@ -9470,7 +9470,6 @@ name = "validator_test_rig" version = "0.1.0" dependencies = [ "eth2", - "logging", "mockito", "regex", "sensitive_url", diff --git a/Cargo.toml b/Cargo.toml index 3d504141e6..d7b26cd03b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -144,6 +144,7 @@ fnv = "1" fs2 = "0.4" futures = "0.3" graffiti_file = { path = "validator_client/graffiti_file" } +gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", rev = "7a36e4c" } hex = "0.4" hashlink = "0.9.0" hyper = "1" diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index e30705719e..a537a1722c 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" authors = [ "Paul Hauner ", "Age Manning BeaconChain { .state_root_at_slot(state_slot)? .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let state = self - .get_state(&state_root, Some(state_slot))? + .get_state(&state_root, Some(state_slot), true)? .ok_or(BeaconChainError::MissingBeaconState(state_root))?; if state.fork_name_unchecked().altair_enabled() { diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index baacd93c45..6f1174c1ba 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1126,6 +1126,12 @@ fn verify_head_block_is_known( } } + if !verify_attestation_is_finalized_checkpoint_or_descendant(attestation.data(), chain) { + return Err(Error::HeadBlockFinalized { + beacon_block_root: attestation.data().beacon_block_root, + }); + } + Ok(block) } else if chain.is_pre_finalization_block(attestation.data().beacon_block_root)? { Err(Error::HeadBlockFinalized { @@ -1359,6 +1365,29 @@ pub fn verify_committee_index(attestation: AttestationRef) -> Res Ok(()) } +fn verify_attestation_is_finalized_checkpoint_or_descendant( + attestation_data: &AttestationData, + chain: &BeaconChain, +) -> bool { + // If we have a split block newer than finalization then we also ban attestations which are not + // descended from that split block. It's important not to try checking `is_descendant` if + // finality is ahead of the split and the split block has been pruned, as `is_descendant` will + // return `false` in this case. + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let attestation_block_root = attestation_data.beacon_block_root; + let finalized_slot = fork_choice + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let split = chain.store.get_split_info(); + let is_descendant_from_split_block = split.slot == 0 + || split.slot <= finalized_slot + || fork_choice.is_descendant(split.block_root, attestation_block_root); + + fork_choice.is_finalized_checkpoint_or_descendant(attestation_block_root) + && is_descendant_from_split_block +} + /// Assists in readability. type CommitteesPerSlot = u64; diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index 7f356bd621..ae715afcd0 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -325,8 +325,10 @@ impl AttesterCache { return Ok(value); } + // We use `cache_state = true` here because if we are attesting to the state it's likely + // to be recent and useful for other things. let mut state: BeaconState = chain - .get_state(&state_root, None)? + .get_state(&state_root, None, true)? .ok_or(Error::MissingBeaconState(state_root))?; if state.slot() > slot { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0defbecf35..0ae9c77001 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -42,7 +42,7 @@ use crate::light_client_optimistic_update_verification::{ Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate, }; use crate::light_client_server_cache::LightClientServerCache; -use crate::migrate::BackgroundMigrator; +use crate::migrate::{BackgroundMigrator, ManualFinalizationNotification}; use crate::naive_aggregation_pool::{ AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, SyncContributionAggregateMap, @@ -118,8 +118,8 @@ use std::sync::Arc; use std::time::Duration; use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator}; use store::{ - BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, - KeyValueStoreOp, StoreItem, StoreOp, + BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, HotStateSummary, + KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::sync::oneshot; @@ -812,8 +812,10 @@ impl BeaconChain { let block = self .get_blinded_block(&block_root)? .ok_or(Error::MissingBeaconBlock(block_root))?; + // This method is only used in tests, so we may as well cache states to make CI go brr. + // TODO(release-v7) move this method out of beacon chain and into `store_tests`` or something equivalent. let state = self - .get_state(&block.state_root(), Some(block.slot()))? + .get_state(&block.state_root(), Some(block.slot()), true)? .ok_or_else(|| Error::MissingBeaconState(block.state_root()))?; let iter = BlockRootsIterator::owned(&self.store, state); Ok(std::iter::once(Ok((block_root, block.slot()))) @@ -1339,8 +1341,9 @@ impl BeaconChain { &self, state_root: &Hash256, slot: Option, + update_cache: bool, ) -> Result>, Error> { - Ok(self.store.get_state(state_root, slot)?) + Ok(self.store.get_state(state_root, slot, update_cache)?) } /// Return the sync committee at `slot + 1` from the canonical chain. @@ -1512,8 +1515,14 @@ impl BeaconChain { })? .ok_or(Error::NoStateForSlot(slot))?; + // This branch is mostly reached from the HTTP API when doing analysis, or in niche + // situations when producing a block. In the HTTP API case we assume the user wants + // to cache states so that future calls are faster, and that if the cache is + // struggling due to non-finality that they will dial down inessential calls. In the + // block proposal case we want to cache the state so that we can process the block + // quickly after it has been signed. Ok(self - .get_state(&state_root, Some(slot))? + .get_state(&state_root, Some(slot), true)? .ok_or(Error::NoStateForSlot(slot))?) } } @@ -1695,6 +1704,45 @@ impl BeaconChain { } } + pub fn manually_compact_database(&self) { + self.store_migrator.process_manual_compaction(); + } + + pub fn manually_finalize_state( + &self, + state_root: Hash256, + checkpoint: Checkpoint, + ) -> Result<(), Error> { + let HotStateSummary { + slot, + latest_block_root, + .. + } = self + .store + .load_hot_state_summary(&state_root) + .map_err(BeaconChainError::DBError)? + .ok_or(BeaconChainError::MissingHotStateSummary(state_root))?; + + if slot != checkpoint.epoch.start_slot(T::EthSpec::slots_per_epoch()) + || latest_block_root != *checkpoint.root + { + return Err(BeaconChainError::InvalidCheckpoint { + state_root, + checkpoint, + }); + } + + let notif = ManualFinalizationNotification { + state_root: state_root.into(), + checkpoint, + head_tracker: self.head_tracker.clone(), + genesis_block_root: self.genesis_block_root, + }; + + self.store_migrator.process_manual_finalization(notif); + Ok(()) + } + /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. /// /// The attestation will be obtained from `self.naive_aggregation_pool`. @@ -2839,6 +2887,15 @@ impl BeaconChain { chain_segment: Vec>, notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { + for block in chain_segment.iter() { + if let Err(error) = self.check_invalid_block_roots(block.block_root()) { + return ChainSegmentResult::Failed { + imported_blocks: vec![], + error, + }; + } + } + let mut imported_blocks = vec![]; // Filter uninteresting blocks from the chain segment in a blocking task. @@ -3330,6 +3387,15 @@ impl BeaconChain { self.remove_notified(&block_root, r) } + /// Check for known and configured invalid block roots before processing. + pub fn check_invalid_block_roots(&self, block_root: Hash256) -> Result<(), BlockError> { + if self.config.invalid_block_roots.contains(&block_root) { + Err(BlockError::KnownInvalidExecutionPayload(block_root)) + } else { + Ok(()) + } + } + /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and /// imported into the chain. /// @@ -6775,9 +6841,11 @@ impl BeaconChain { })?; let beacon_state_root = beacon_block.state_root(); + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let mut beacon_state = self .store - .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .get_state(&beacon_state_root, Some(beacon_block.slot()), true)? .ok_or_else(|| { Error::DBInconsistent(format!("Missing state {:?}", beacon_state_root)) })?; @@ -6929,8 +6997,10 @@ impl BeaconChain { if signed_beacon_block.slot() % T::EthSpec::slots_per_epoch() == 0 { let block = self.get_blinded_block(&block_hash).unwrap().unwrap(); + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let state = self - .get_state(&block.state_root(), Some(block.slot())) + .get_state(&block.state_root(), Some(block.slot()), true) .unwrap() .unwrap(); finalized_blocks.insert(state.finalized_checkpoint().root); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 88df48d0e9..599004d8bf 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -282,6 +282,9 @@ pub enum BlockError { /// problems to worry about than losing peers, and we're doing the network a favour by /// disconnecting. ParentExecutionPayloadInvalid { parent_root: Hash256 }, + /// This is a known invalid block that was listed in Lighthouses configuration. + /// At the moment this error is only relevant as part of the Holesky network recovery efforts. + KnownInvalidExecutionPayload(Hash256), /// The block is a slashable equivocation from the proposer. /// /// ## Peer scoring @@ -862,6 +865,9 @@ impl GossipVerifiedBlock { return Err(BlockError::DuplicateFullyImported(block_root)); } + // Do not process a block that is known to be invalid. + chain.check_invalid_block_roots(block_root)?; + // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. @@ -1080,6 +1086,9 @@ impl SignatureVerifiedBlock { .fork_name(&chain.spec) .map_err(BlockError::InconsistentFork)?; + // Check whether the block is a banned block prior to loading the parent. + chain.check_invalid_block_roots(block_root)?; + let (mut parent, block) = load_parent(block, chain)?; let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( @@ -1746,7 +1755,22 @@ pub fn check_block_is_finalized_checkpoint_or_descendant< fork_choice: &BeaconForkChoice, block: B, ) -> Result { - if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { + // If we have a split block newer than finalization then we also ban blocks which are not + // descended from that split block. It's important not to try checking `is_descendant` if + // finality is ahead of the split and the split block has been pruned, as `is_descendant` will + // return `false` in this case. + let finalized_slot = fork_choice + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let split = chain.store.get_split_info(); + let is_descendant_from_split_block = split.slot == 0 + || split.slot <= finalized_slot + || fork_choice.is_descendant(split.block_root, block.parent_root()); + + if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) + && is_descendant_from_split_block + { Ok(block) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 78216770e5..010190bfbc 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -282,8 +282,13 @@ where .get_blinded_block(&chain.genesis_block_root) .map_err(|e| descriptive_db_error("genesis block", &e))? .ok_or("Genesis block not found in store")?; + // We're resuming from some state in the db so it makes sense to cache it. let genesis_state = store - .get_state(&genesis_block.state_root(), Some(genesis_block.slot())) + .get_state( + &genesis_block.state_root(), + Some(genesis_block.slot()), + true, + ) .map_err(|e| descriptive_db_error("genesis state", &e))? .ok_or("Genesis state not found in store")?; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index bac47f5da7..d99c6038d3 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -773,6 +773,12 @@ impl BeaconChain { .execution_status .is_optimistic_or_invalid(); + // Update the state cache so it doesn't mistakenly prune the new head. + self.store + .state_cache + .lock() + .update_head_block_root(new_cached_head.head_block_root()); + // Detect and potentially report any re-orgs. let reorg_distance = detect_reorg( &old_snapshot.beacon_state, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index b881438c1c..808c96d965 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,7 +1,8 @@ pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde::{Deserialize, Serialize}; -use std::time::Duration; -use types::{Checkpoint, Epoch}; +use std::str::FromStr; +use std::{collections::HashSet, sync::LazyLock, time::Duration}; +use types::{Checkpoint, Epoch, Hash256}; pub const DEFAULT_RE_ORG_HEAD_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_PARENT_THRESHOLD: ReOrgThreshold = ReOrgThreshold(160); @@ -19,6 +20,12 @@ pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; /// Default sync tolerance epochs. pub const DEFAULT_SYNC_TOLERANCE_EPOCHS: u64 = 2; +/// Invalid block root to be banned from processing and importing on Holesky network by default. +pub static INVALID_HOLESKY_BLOCK_ROOT: LazyLock = LazyLock::new(|| { + Hash256::from_str("2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f359") + .expect("valid block root") +}); + #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { /// Maximum number of slots to skip when importing an attestation. @@ -104,6 +111,11 @@ pub struct ChainConfig { pub block_publishing_delay: Option, /// Artificial delay for data column publishing. For PeerDAS testing only. pub data_column_publishing_delay: Option, + /// Block roots of "banned" blocks which Lighthouse will refuse to import. + /// + /// On Holesky there is a block which is added to this set by default but which can be removed + /// by using `--invalid-block-roots ""`. + pub invalid_block_roots: HashSet, } impl Default for ChainConfig { @@ -142,6 +154,7 @@ impl Default for ChainConfig { sync_tolerance_epochs: DEFAULT_SYNC_TOLERANCE_EPOCHS, block_publishing_delay: None, data_column_publishing_delay: None, + invalid_block_roots: HashSet::new(), } } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 2e13ab4090..8509c52c8a 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -61,6 +61,7 @@ pub enum BeaconChainError { ForkChoiceStoreError(ForkChoiceStoreError), MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), + MissingHotStateSummary(Hash256), SlotProcessingError(SlotProcessingError), EpochProcessingError(EpochProcessingError), StateAdvanceError(StateAdvanceError), @@ -181,9 +182,9 @@ pub enum BeaconChainError { execution_block_hash: Option, }, ForkchoiceUpdate(execution_layer::Error), - FinalizedCheckpointMismatch { - head_state: Checkpoint, - fork_choice: Hash256, + InvalidCheckpoint { + state_root: Hash256, + checkpoint: Checkpoint, }, InvalidSlot(Slot), HeadBlockNotFullyVerified { diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index c500e1b4b6..cde2950c89 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -113,8 +113,9 @@ pub fn reset_fork_choice_to_finalization, Cold: It // Advance finalized state to finalized epoch (to handle skipped slots). let finalized_state_root = finalized_block.state_root(); + // The enshrined finalized state should be in the state cache. let mut finalized_state = store - .get_state(&finalized_state_root, Some(finalized_block.slot())) + .get_state(&finalized_state_root, Some(finalized_block.slot()), true) .map_err(|e| format!("Error loading finalized state: {:?}", e))? .ok_or_else(|| { format!( diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index c9173dc0d7..8e29be9732 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -317,8 +317,11 @@ impl LightClientServerCache { metrics::inc_counter(&metrics::LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS); // Compute the value, handling potential errors. + // This state should already be cached. By electing not to cache it here + // we remove any chance of the light client server from affecting the state cache. + // We'd like the light client server to be as minimally invasive as possible. let mut state = store - .get_state(block_state_root, Some(block_slot))? + .get_state(block_state_root, Some(block_slot), false)? .ok_or_else(|| { BeaconChainError::DBInconsistent(format!("Missing state {:?}", block_state_root)) })?; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index b64da00e76..cda5b34103 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -123,14 +123,23 @@ pub enum Notification { Finalization(FinalizationNotification), Reconstruction, PruneBlobs(Epoch), + ManualFinalization(ManualFinalizationNotification), + ManualCompaction, +} + +pub struct ManualFinalizationNotification { + pub state_root: BeaconStateHash, + pub checkpoint: Checkpoint, + pub head_tracker: Arc, + pub genesis_block_root: Hash256, } pub struct FinalizationNotification { - finalized_state_root: BeaconStateHash, - finalized_checkpoint: Checkpoint, - head_tracker: Arc, - prev_migration: Arc>, - genesis_block_root: Hash256, + pub finalized_state_root: BeaconStateHash, + pub finalized_checkpoint: Checkpoint, + pub head_tracker: Arc, + pub prev_migration: Arc>, + pub genesis_block_root: Hash256, } impl, Cold: ItemStore> BackgroundMigrator { @@ -187,6 +196,22 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator>, + notif: ManualFinalizationNotification, + ) { + // We create a "dummy" prev migration + let prev_migration = PrevMigration { + epoch: Epoch::new(1), + epochs_per_migration: 2, + }; + let notif = FinalizationNotification { + finalized_state_root: notif.state_root, + finalized_checkpoint: notif.checkpoint, + head_tracker: notif.head_tracker, + prev_migration: Arc::new(prev_migration.into()), + genesis_block_root: notif.genesis_block_root, + }; + Self::run_migration(db, notif); + } + /// Perform the actual work of `process_finalization`. fn run_migration(db: Arc>, notif: FinalizationNotification) { // Do not run too frequently. @@ -303,7 +347,8 @@ impl, Cold: ItemStore> BackgroundMigrator state, other => { error!( @@ -382,6 +427,15 @@ impl, Cold: ItemStore> BackgroundMigrator>) { + debug!("Running manual compaction"); + if let Err(error) = db.compact() { + warn!(?error, "Database compaction failed"); + } else { + debug!("Manual compaction completed"); + } + } + /// Spawn a new child thread to run the migration process. /// /// Return a channel handle for sending requests to the thread. @@ -394,16 +448,30 @@ impl, Cold: ItemStore> BackgroundMigrator reconstruction_notif = Some(notif), Notification::Finalization(fin) => finalization_notif = Some(fin), + Notification::ManualFinalization(fin) => manual_finalization_notif = Some(fin), Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab), + Notification::ManualCompaction => manual_compaction_notif = Some(notif), } // Read the rest of the messages in the channel, taking the best of each type. for notif in rx.try_iter() { match notif { Notification::Reconstruction => reconstruction_notif = Some(notif), + Notification::ManualCompaction => manual_compaction_notif = Some(notif), + Notification::ManualFinalization(fin) => { + if let Some(current) = manual_finalization_notif.as_mut() { + if fin.checkpoint.epoch > current.checkpoint.epoch { + *current = fin; + } + } else { + manual_finalization_notif = Some(fin); + } + } Notification::Finalization(fin) => { if let Some(current) = finalization_notif.as_mut() { if fin.finalized_checkpoint.epoch @@ -426,12 +494,18 @@ impl, Cold: ItemStore> BackgroundMigrator Option> { self.chain .store - .load_hot_state(&state_hash.into()) + .load_hot_state(&state_hash.into(), true) .unwrap() .map(|(state, _)| state) } diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index dcc63ddf62..30eec539fc 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -36,6 +36,9 @@ pub const VALIDATOR_COUNT: usize = 256; pub const CAPELLA_FORK_EPOCH: usize = 1; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// A cached set of keys. static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); @@ -1225,7 +1228,11 @@ async fn attestation_that_skips_epochs() { let mut state = harness .chain - .get_state(&earlier_block.state_root(), Some(earlier_slot)) + .get_state( + &earlier_block.state_root(), + Some(earlier_slot), + CACHE_STATE_IN_TESTS, + ) .expect("should not error getting state") .expect("should find state"); @@ -1329,9 +1336,14 @@ async fn attestation_validator_receive_proposer_reward_and_withdrawals() { .await; let current_slot = harness.get_current_slot(); + let mut state = harness .chain - .get_state(&earlier_block.state_root(), Some(earlier_slot)) + .get_state( + &earlier_block.state_root(), + Some(earlier_slot), + CACHE_STATE_IN_TESTS, + ) .expect("should not error getting state") .expect("should find state"); @@ -1399,7 +1411,11 @@ async fn attestation_to_finalized_block() { let mut state = harness .chain - .get_state(&earlier_block.state_root(), Some(earlier_slot)) + .get_state( + &earlier_block.state_root(), + Some(earlier_slot), + CACHE_STATE_IN_TESTS, + ) .expect("should not error getting state") .expect("should find state"); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index b75b06caff..6226ed39cb 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -18,6 +18,9 @@ use types::{ChainSpec, ForkName, Slot}; pub const VALIDATOR_COUNT: usize = 64; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + type E = MinimalEthSpec; static KEYPAIRS: LazyLock> = @@ -114,8 +117,13 @@ async fn test_sync_committee_rewards() { .get_blinded_block(&block.parent_root()) .unwrap() .unwrap(); + let parent_state = chain - .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .get_state( + &parent_block.state_root(), + Some(parent_block.slot()), + CACHE_STATE_IN_TESTS, + ) .unwrap() .unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 9212ed998d..38ff87d0c8 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -39,6 +39,9 @@ use types::*; pub const LOW_VALIDATOR_COUNT: usize = 24; pub const HIGH_VALIDATOR_COUNT: usize = 64; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// A cached set of keys. static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT)); @@ -756,6 +759,7 @@ async fn delete_blocks_and_states() { .get_state( &faulty_head_block.state_root(), Some(faulty_head_block.slot()), + CACHE_STATE_IN_TESTS, ) .expect("no db error") .expect("faulty head state exists"); @@ -769,7 +773,12 @@ async fn delete_blocks_and_states() { break; } store.delete_state(&state_root, slot).unwrap(); - assert_eq!(store.get_state(&state_root, Some(slot)).unwrap(), None); + assert_eq!( + store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap(), + None + ); } // Double-deleting should also be OK (deleting non-existent things is fine) @@ -1053,7 +1062,11 @@ fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconStat .unwrap(); harness .chain - .get_state(&head_block.state_root(), Some(head_block.slot())) + .get_state( + &head_block.state_root(), + Some(head_block.slot()), + CACHE_STATE_IN_TESTS, + ) .unwrap() .unwrap() } @@ -1890,7 +1903,10 @@ fn check_all_states_exist<'a>( states: impl Iterator, ) { for &state_hash in states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + let state = harness + .chain + .get_state(&state_hash.into(), None, CACHE_STATE_IN_TESTS) + .unwrap(); assert!( state.is_some(), "expected state {:?} to be in DB", @@ -1908,7 +1924,7 @@ fn check_no_states_exist<'a>( assert!( harness .chain - .get_state(&state_root.into(), None) + .get_state(&state_root.into(), None, CACHE_STATE_IN_TESTS) .unwrap() .is_none(), "state {:?} should not be in the DB", @@ -2342,7 +2358,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .get_or_reconstruct_blobs(&wss_block_root) .unwrap(); let wss_state = full_store - .get_state(&wss_state_root, Some(checkpoint_slot)) + .get_state(&wss_state_root, Some(checkpoint_slot), CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); @@ -2454,7 +2470,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Check that the new block's state can be loaded correctly. let mut state = beacon_chain .store - .get_state(&state_root, Some(slot)) + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); @@ -2584,7 +2600,10 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .unwrap() .map(Result::unwrap) { - let mut state = store.get_state(&state_root, Some(slot)).unwrap().unwrap(); + let mut state = store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); assert_eq!(state.slot(), slot); assert_eq!(state.canonical_root().unwrap(), state_root); } @@ -3410,9 +3429,10 @@ async fn prune_historic_states() { let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let genesis_state_root = harness.chain.genesis_state_root; + let genesis_state = harness .chain - .get_state(&genesis_state_root, None) + .get_state(&genesis_state_root, None, CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); @@ -3433,7 +3453,10 @@ async fn prune_historic_states() { .map(Result::unwrap) .collect::>(); for &(state_root, slot) in &first_epoch_state_roots { - assert!(store.get_state(&state_root, Some(slot)).unwrap().is_some()); + assert!(store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .is_some()); } store @@ -3448,7 +3471,10 @@ async fn prune_historic_states() { // Ensure all epoch 0 states other than the genesis have been pruned. for &(state_root, slot) in &first_epoch_state_roots { assert_eq!( - store.get_state(&state_root, Some(slot)).unwrap().is_some(), + store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .is_some(), slot == 0 ); } @@ -3574,7 +3600,7 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) { harness .chain .store - .get_state(&checkpoint.beacon_state_root(), None) + .get_state(&checkpoint.beacon_state_root(), None, CACHE_STATE_IN_TESTS) .expect("no error") .expect("state exists") .slot(), @@ -3636,7 +3662,7 @@ fn check_iterators(harness: &TestHarness) { harness .chain .store - .get_state(&state_root, Some(slot)) + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() .is_some(), "state {:?} from canonical chain should be in DB", diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 6d30b8a4e3..c8bbcce20d 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -21,6 +21,9 @@ pub type E = MainnetEthSpec; pub const VALIDATOR_COUNT: usize = 256; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// A cached set of keys. static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); @@ -755,7 +758,10 @@ async fn unaggregated_gossip_verification() { // Load the block and state for the given root. let block = chain.get_block(&root).await.unwrap().unwrap(); - let mut state = chain.get_state(&block.state_root(), None).unwrap().unwrap(); + let mut state = chain + .get_state(&block.state_root(), None, CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); // Advance the state to simulate a pre-state for block production. let slot = valid_sync_committee_message.slot + 1; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c641f32b82..c801361fd5 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -12,10 +12,12 @@ use operation_pool::PersistedOperationPool; use state_processing::{per_slot_processing, per_slot_processing::Error as SlotProcessingError}; use std::sync::LazyLock; use types::{ - BeaconState, BeaconStateError, BlockImportSource, EthSpec, Hash256, Keypair, MinimalEthSpec, - RelativeEpoch, Slot, + BeaconState, BeaconStateError, BlockImportSource, Checkpoint, EthSpec, Hash256, Keypair, + MinimalEthSpec, RelativeEpoch, Slot, }; +type E = MinimalEthSpec; + // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; @@ -24,12 +26,22 @@ static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness(validator_count: usize) -> BeaconChainHarness> { + get_harness_with_config( + validator_count, + ChainConfig { + reconstruct_historic_states: true, + ..Default::default() + }, + ) +} + +fn get_harness_with_config( + validator_count: usize, + chain_config: ChainConfig, +) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() - .chain_config(ChainConfig { - reconstruct_historic_states: true, - ..ChainConfig::default() - }) + .chain_config(chain_config) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() @@ -869,3 +881,165 @@ async fn block_roots_skip_slot_behaviour() { "WhenSlotSkipped::Prev should return None on a future slot" ); } + +async fn pseudo_finalize_test_generic( + epochs_per_migration: u64, + expect_true_finalization_migration: bool, +) { + // This test ensures that after pseudo finalization, we can still finalize the chain without issues + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; + + let chain_config = ChainConfig { + reconstruct_historic_states: true, + epochs_per_migration, + ..Default::default() + }; + let harness = get_harness_with_config(VALIDATOR_COUNT, chain_config); + + let one_third = VALIDATOR_COUNT / 3; + let attesters = (0..one_third).collect(); + + // extend the chain, but don't finalize + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; + + harness.advance_slot(); + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let split = harness.chain.store.get_split_info(); + + assert_eq!( + state.slot(), + num_blocks_produced, + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + num_blocks_produced / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + 0, + "There should be no justified checkpoint" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + 0, + "There should be no finalized checkpoint" + ); + assert_eq!(split.slot, 0, "Our split point should be unset"); + + let checkpoint = Checkpoint { + epoch: head.beacon_state.current_epoch(), + root: head.beacon_block_root, + }; + + // pseudo finalize + harness + .chain + .manually_finalize_state(head.beacon_state_root(), checkpoint) + .unwrap(); + + let split = harness.chain.store.get_split_info(); + let pseudo_finalized_slot = split.slot; + + assert_eq!( + state.current_justified_checkpoint().epoch, + 0, + "We pseudo finalized, but our justified checkpoint should still be unset" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + 0, + "We pseudo finalized, but our finalized checkpoint should still be unset" + ); + assert_eq!( + split.slot, + head.beacon_state.slot(), + "We pseudo finalized, our split point should be at the current head slot" + ); + + // finalize the chain + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + harness.advance_slot(); + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let split = harness.chain.store.get_split_info(); + + assert_eq!( + state.slot(), + num_blocks_produced * 2, + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + (num_blocks_produced * 2) / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + state.current_epoch() - 1, + "the head should be justified one behind the current epoch" + ); + let finalized_epoch = state.finalized_checkpoint().epoch; + assert_eq!( + finalized_epoch, + state.current_epoch() - 2, + "the head should be finalized two behind the current epoch" + ); + + let expected_split_slot = if pseudo_finalized_slot.epoch(E::slots_per_epoch()) + + epochs_per_migration + > finalized_epoch + { + pseudo_finalized_slot + } else { + finalized_epoch.start_slot(E::slots_per_epoch()) + }; + assert_eq!( + split.slot, expected_split_slot, + "We finalized, our split point should be updated according to epochs_per_migration" + ); + + // In the case that we did not process the true finalization migration (due to + // epochs_per_migration), check that the chain finalized *despite* the absence of the split + // block in fork choice. + // This is a regression test for https://github.com/sigp/lighthouse/pull/7105 + if !expect_true_finalization_migration { + assert_eq!(expected_split_slot, pseudo_finalized_slot); + assert!(!harness + .chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&split.block_root)); + } +} + +#[tokio::test] +async fn pseudo_finalize_basic() { + let epochs_per_migration = 0; + let expect_true_migration = true; + pseudo_finalize_test_generic(epochs_per_migration, expect_true_migration).await; +} + +#[tokio::test] +async fn pseudo_finalize_with_lagging_split_update() { + let epochs_per_migration = 10; + let expect_true_migration = false; + pseudo_finalize_test_generic(epochs_per_migration, expect_true_migration).await; +} diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 2f3f340445..23ab5e3752 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -126,8 +126,11 @@ pub fn get_attestation_performance( // Load state for block replay. let state_root = prior_block.state_root(); + + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let state = chain - .get_state(&state_root, Some(prior_slot)) + .get_state(&state_root, Some(prior_slot), true) .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) .map_err(unhandled_error)?; diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 431547f10b..249a6732dc 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -285,8 +285,10 @@ pub fn get_block_packing_efficiency( // Load state for block replay. let starting_state_root = first_block.state_root(); + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let starting_state = chain - .get_state(&starting_state_root, Some(prior_slot)) + .get_state(&starting_state_root, Some(prior_slot), true) .and_then(|maybe_state| { maybe_state.ok_or(BeaconChainError::MissingBeaconState(starting_state_root)) }) diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index fbb16e9540..29b23e89a7 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -42,8 +42,10 @@ pub fn get_block_rewards( .map_err(unhandled_error)? .ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?; + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let mut state = chain - .get_state(&state_root, Some(prior_slot)) + .get_state(&state_root, Some(prior_slot), true) .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) .map_err(unhandled_error)?; @@ -124,8 +126,10 @@ pub fn compute_block_rewards( )) })?; + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let parent_state = chain - .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .get_state(&parent_block.state_root(), Some(parent_block.slot()), true) .map_err(unhandled_error)? .ok_or_else(|| { custom_bad_request(format!( diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1b8cdc5605..368faae612 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -87,11 +87,11 @@ use tokio_stream::{ use tracing::{debug, error, info, warn}; use types::{ fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, - AttesterSlashing, BeaconStateError, ChainSpec, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, - ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, - RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncCommitteeMessage, SyncContributionData, + AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, + Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, + ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, + SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -1078,6 +1078,72 @@ pub fn serve( }, ); + // GET beacon/states/{state_id}/pending_deposits + let get_beacon_state_pending_deposits = beacon_states_path + .clone() + .and(warp::path("pending_deposits")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(deposits) = state.pending_deposits() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending deposits not found".to_string(), + )); + }; + + Ok((deposits.clone(), execution_optimistic, finalized)) + }, + )?; + + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ); + + // GET beacon/states/{state_id}/pending_partial_withdrawals + let get_beacon_state_pending_partial_withdrawals = beacon_states_path + .clone() + .and(warp::path("pending_partial_withdrawals")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(withdrawals) = state.pending_partial_withdrawals() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending withdrawals not found".to_string(), + )); + }; + + Ok((withdrawals.clone(), execution_optimistic, finalized)) + }, + )?; + + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -3903,6 +3969,52 @@ pub fn serve( }, ); + // POST lighthouse/finalize + let post_lighthouse_finalize = warp::path("lighthouse") + .and(warp::path("finalize")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |request_data: api_types::ManualFinalizationRequestData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let checkpoint = Checkpoint { + epoch: request_data.epoch, + root: request_data.block_root, + }; + + chain + .manually_finalize_state(request_data.state_root, checkpoint) + .map(|_| api_types::GenericResponse::from(request_data)) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "Failed to finalize state due to error: {e:?}" + )) + }) + }) + }, + ); + + // POST lighthouse/compaction + let post_lighthouse_compaction = warp::path("lighthouse") + .and(warp::path("compaction")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + chain.manually_compact_database(); + Ok(api_types::GenericResponse::from(String::from( + "Triggered manual compaction", + ))) + }) + }, + ); + // POST lighthouse/liveness let post_lighthouse_liveness = warp::path("lighthouse") .and(warp::path("liveness")) @@ -4557,6 +4669,8 @@ pub fn serve( .uor(get_beacon_state_committees) .uor(get_beacon_state_sync_committees) .uor(get_beacon_state_randao) + .uor(get_beacon_state_pending_deposits) + .uor(get_beacon_state_pending_partial_withdrawals) .uor(get_beacon_headers) .uor(get_beacon_headers_block_id) .uor(get_beacon_block) @@ -4658,6 +4772,8 @@ pub fn serve( .uor(post_lighthouse_block_rewards) .uor(post_lighthouse_ui_validator_metrics) .uor(post_lighthouse_ui_validator_info) + .uor(post_lighthouse_finalize) + .uor(post_lighthouse_compaction) .recover(warp_utils::reject::handle_rejection), ), ) diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 353390cdad..a9f66de467 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -189,8 +189,10 @@ impl StateId { _ => (self.root(chain)?, None), }; + // This branch is reached from the HTTP API. We assume the user wants + // to cache states so that future calls are faster. let state = chain - .get_state(&state_root, slot_opt) + .get_state(&state_root, slot_opt, true) .map_err(warp_utils::reject::unhandled_error) .and_then(|opt| { opt.ok_or_else(|| { diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index d7e8a1f9ef..9bc1f6ead4 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -56,8 +56,10 @@ pub fn get_state_before_applying_block( }) .map_err(|e| custom_not_found(format!("Parent block is not available! {:?}", e)))?; + // We are about to apply a new block to the chain. It's parent state + // is a useful/recent state, we elect to cache it. let parent_state = chain - .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .get_state(&parent_block.state_root(), Some(parent_block.slot()), true) .and_then(|maybe_state| { maybe_state .ok_or_else(|| BeaconChainError::MissingBeaconState(parent_block.state_root())) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index ddeda8b66d..768441533f 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1187,6 +1187,60 @@ impl ApiTester { self } + pub async fn test_beacon_states_pending_deposits(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = self + .client + .get_beacon_states_pending_deposits(state_id.0) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.pending_deposits().unwrap(); + + assert_eq!(result.unwrap(), expected.to_vec()); + } + + self + } + + pub async fn test_beacon_states_pending_partial_withdrawals(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = self + .client + .get_beacon_states_pending_partial_withdrawals(state_id.0) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.pending_partial_withdrawals().unwrap(); + + assert_eq!(result.unwrap(), expected.to_vec()); + } + + self + } + pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -6298,6 +6352,22 @@ async fn beacon_get_state_info() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_state_info_electra() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_beacon_states_pending_deposits() + .await + .test_beacon_states_pending_partial_withdrawals() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_get_blocks() { ApiTester::new() diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 8cae529de0..4f1825af20 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -18,7 +18,7 @@ ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } fnv = { workspace = true } futures = { workspace = true } -gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", branch = "sigp-gossipsub" } +gossipsub = { workspace = true } hex = { workspace = true } itertools = { workspace = true } libp2p-mplex = "0.43" diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 0bf281fd75..3f0b5b96ef 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -996,7 +996,7 @@ impl Network { } } - if let PublishError::InsufficientPeers = e { + if let PublishError::NoPeersSubscribedToTopic = e { self.gossip_cache.insert(topic, message_data); } } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 4250f8f8bb..4e36953880 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -9,7 +9,7 @@ bls = { workspace = true } eth2 = { workspace = true } eth2_network_config = { workspace = true } genesis = { workspace = true } -gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", branch = "sigp-gossipsub" } +gossipsub = { workspace = true } k256 = "0.13.4" kzg = { workspace = true } matches = "0.1.8" diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index afab2d178c..34dca4f100 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1403,6 +1403,7 @@ impl NetworkBeaconProcessor { | Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::ExecutionPayloadError(_)) | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) + | Err(e @ BlockError::KnownInvalidExecutionPayload(_)) | Err(e @ BlockError::GenesisBlock) => { warn!(error = %e, "Could not verify block for gossip. Rejecting the block"); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 65097da0c6..48ae26c826 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -758,6 +758,18 @@ impl NetworkBeaconProcessor { debug!("Finalized or earlier block processed"); Ok(()) } + BlockError::NotFinalizedDescendant { block_parent_root } => { + debug!( + "Not syncing to a chain that conflicts with the canonical or manual finalized checkpoint" + ); + Err(ChainSegmentFailed { + message: format!( + "Block with parent_root {} conflicts with our checkpoint state", + block_parent_root + ), + peer_action: Some(PeerAction::Fatal), + }) + } BlockError::GenesisBlock => { debug!("Genesis block was processed"); Ok(()) @@ -817,6 +829,14 @@ impl NetworkBeaconProcessor { peer_action: Some(PeerAction::LowToleranceError), }) } + // Penalise peers for sending us banned blocks. + BlockError::KnownInvalidExecutionPayload(block_root) => { + warn!(?block_root, "Received block known to be invalid",); + Err(ChainSegmentFailed { + message: format!("Banned block: {block_root:?}"), + peer_action: Some(PeerAction::Fatal), + }) + } other => { debug!( msg = "peer sent invalid block", diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index cd3f0dcbeb..509caf7316 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -10,8 +10,7 @@ use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::manager::BatchProcessResult; -use crate::sync::network_context::RangeRequestId; -use crate::sync::network_context::SyncNetworkContext; +use crate::sync::network_context::{RangeRequestId, RpcResponseError, SyncNetworkContext}; use crate::sync::range_sync::{ BatchConfig, BatchId, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, }; @@ -375,6 +374,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, + err: RpcResponseError, ) -> Result<(), BackFillError> { if let Some(batch) = self.batches.get_mut(&batch_id) { // A batch could be retried without the peer failing the request (disconnecting/ @@ -385,7 +385,7 @@ impl BackFillSync { if !batch.is_expecting_block(&request_id) { return Ok(()); } - debug!(batch_epoch = %batch_id, error = "rpc_error", "Batch failed"); + debug!(batch_epoch = %batch_id, error = ?err, "Batch download failed"); if let Some(active_requests) = self.active_requests.get_mut(peer_id) { active_requests.remove(&batch_id); } diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 6c8a8eab63..45cb1aeace 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,89 +1,156 @@ use beacon_chain::{ block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root, }; -use std::{ - collections::{HashMap, VecDeque}, - sync::Arc, +use lighthouse_network::service::api_types::{ + BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, }; +use std::{collections::HashMap, sync::Arc}; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, EthSpec, Hash256, RuntimeVariableList, - SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, + Hash256, RuntimeVariableList, SignedBeaconBlock, }; -#[derive(Debug)] pub struct RangeBlockComponentsRequest { /// Blocks we have received awaiting for their corresponding sidecar. - blocks: VecDeque>>, + blocks_request: ByRangeRequest>>>, /// Sidecars we have received awaiting for their corresponding block. - blobs: VecDeque>>, - data_columns: VecDeque>>, - /// Whether the individual RPC request for blocks is finished or not. - is_blocks_stream_terminated: bool, - /// Whether the individual RPC request for sidecars is finished or not. - is_sidecars_stream_terminated: bool, - custody_columns_streams_terminated: usize, - /// Used to determine if this accumulator should wait for a sidecars stream termination - expects_blobs: bool, - expects_custody_columns: Option>, - /// Used to determine if the number of data columns stream termination this accumulator should - /// wait for. This may be less than the number of `expects_custody_columns` due to request batching. - num_custody_column_requests: Option, + block_data_request: RangeBlockDataRequest, +} + +enum ByRangeRequest { + Active(I), + Complete(T), +} + +enum RangeBlockDataRequest { + NoData, + Blobs(ByRangeRequest>>>), + DataColumns { + requests: HashMap< + DataColumnsByRangeRequestId, + ByRangeRequest>, + >, + expected_custody_columns: Vec, + }, } impl RangeBlockComponentsRequest { pub fn new( - expects_blobs: bool, - expects_custody_columns: Option>, - num_custody_column_requests: Option, + blocks_req_id: BlocksByRangeRequestId, + blobs_req_id: Option, + data_columns: Option<(Vec, Vec)>, ) -> Self { - Self { - blocks: <_>::default(), - blobs: <_>::default(), - data_columns: <_>::default(), - is_blocks_stream_terminated: false, - is_sidecars_stream_terminated: false, - custody_columns_streams_terminated: 0, - expects_blobs, - expects_custody_columns, - num_custody_column_requests, - } - } - - pub fn add_blocks(&mut self, blocks: Vec>>) { - for block in blocks { - self.blocks.push_back(block); - } - self.is_blocks_stream_terminated = true; - } - - pub fn add_blobs(&mut self, blobs: Vec>>) { - for blob in blobs { - self.blobs.push_back(blob); - } - self.is_sidecars_stream_terminated = true; - } - - pub fn add_custody_columns(&mut self, columns: Vec>>) { - for column in columns { - self.data_columns.push_back(column); - } - // TODO(das): this mechanism is dangerous, if somehow there are two requests for the - // same column index it can terminate early. This struct should track that all requests - // for all custody columns terminate. - self.custody_columns_streams_terminated += 1; - } - - pub fn into_responses(self, spec: &ChainSpec) -> Result>, String> { - if let Some(expects_custody_columns) = self.expects_custody_columns.clone() { - self.into_responses_with_custody_columns(expects_custody_columns, spec) + let block_data_request = if let Some(blobs_req_id) = blobs_req_id { + RangeBlockDataRequest::Blobs(ByRangeRequest::Active(blobs_req_id)) + } else if let Some((requests, expected_custody_columns)) = data_columns { + RangeBlockDataRequest::DataColumns { + requests: requests + .into_iter() + .map(|id| (id, ByRangeRequest::Active(id))) + .collect(), + expected_custody_columns, + } } else { - self.into_responses_with_blobs(spec) + RangeBlockDataRequest::NoData + }; + + Self { + blocks_request: ByRangeRequest::Active(blocks_req_id), + block_data_request, } } - fn into_responses_with_blobs(self, spec: &ChainSpec) -> Result>, String> { - let RangeBlockComponentsRequest { blocks, blobs, .. } = self; + pub fn add_blocks( + &mut self, + req_id: BlocksByRangeRequestId, + blocks: Vec>>, + ) -> Result<(), String> { + self.blocks_request.finish(req_id, blocks) + } + pub fn add_blobs( + &mut self, + req_id: BlobsByRangeRequestId, + blobs: Vec>>, + ) -> Result<(), String> { + match &mut self.block_data_request { + RangeBlockDataRequest::NoData => Err("received blobs but expected no data".to_owned()), + RangeBlockDataRequest::Blobs(ref mut req) => req.finish(req_id, blobs), + RangeBlockDataRequest::DataColumns { .. } => { + Err("received blobs but expected data columns".to_owned()) + } + } + } + + pub fn add_custody_columns( + &mut self, + req_id: DataColumnsByRangeRequestId, + columns: Vec>>, + ) -> Result<(), String> { + match &mut self.block_data_request { + RangeBlockDataRequest::NoData => { + Err("received data columns but expected no data".to_owned()) + } + RangeBlockDataRequest::Blobs(_) => { + Err("received data columns but expected blobs".to_owned()) + } + RangeBlockDataRequest::DataColumns { + ref mut requests, .. + } => { + let req = requests + .get_mut(&req_id) + .ok_or(format!("unknown data columns by range req_id {req_id}"))?; + req.finish(req_id, columns) + } + } + } + + pub fn responses(&self, spec: &ChainSpec) -> Option>, String>> { + let Some(blocks) = self.blocks_request.to_finished() else { + return None; + }; + + match &self.block_data_request { + RangeBlockDataRequest::NoData => { + Some(Self::responses_with_blobs(blocks.to_vec(), vec![], spec)) + } + RangeBlockDataRequest::Blobs(request) => { + let Some(blobs) = request.to_finished() else { + return None; + }; + Some(Self::responses_with_blobs( + blocks.to_vec(), + blobs.to_vec(), + spec, + )) + } + RangeBlockDataRequest::DataColumns { + requests, + expected_custody_columns, + } => { + let mut data_columns = vec![]; + for req in requests.values() { + let Some(data) = req.to_finished() else { + return None; + }; + data_columns.extend(data.clone()) + } + + Some(Self::responses_with_custody_columns( + blocks.to_vec(), + data_columns, + expected_custody_columns, + spec, + )) + } + } + } + + fn responses_with_blobs( + blocks: Vec>>, + blobs: Vec>>, + spec: &ChainSpec, + ) -> Result>, String> { // There can't be more more blobs than blocks. i.e. sending any blob (empty // included) for a skipped slot is not permitted. let mut responses = Vec::with_capacity(blocks.len()); @@ -129,17 +196,12 @@ impl RangeBlockComponentsRequest { Ok(responses) } - fn into_responses_with_custody_columns( - self, - expects_custody_columns: Vec, + fn responses_with_custody_columns( + blocks: Vec>>, + data_columns: DataColumnSidecarList, + expects_custody_columns: &[ColumnIndex], spec: &ChainSpec, ) -> Result>, String> { - let RangeBlockComponentsRequest { - blocks, - data_columns, - .. - } = self; - // Group data columns by block_root and index let mut data_columns_by_block = HashMap::>>>::new(); @@ -177,7 +239,7 @@ impl RangeBlockComponentsRequest { }; let mut custody_columns = vec![]; - for index in &expects_custody_columns { + for index in expects_custody_columns { let Some(data_column) = data_columns_by_index.remove(index) else { return Err(format!("No column for block {block_root:?} index {index}")); }; @@ -210,20 +272,27 @@ impl RangeBlockComponentsRequest { Ok(rpc_blocks) } +} - pub fn is_finished(&self) -> bool { - if !self.is_blocks_stream_terminated { - return false; - } - if self.expects_blobs && !self.is_sidecars_stream_terminated { - return false; - } - if let Some(expects_custody_column_responses) = self.num_custody_column_requests { - if self.custody_columns_streams_terminated < expects_custody_column_responses { - return false; +impl ByRangeRequest { + fn finish(&mut self, id: I, data: T) -> Result<(), String> { + match self { + Self::Active(expected_id) => { + if expected_id != &id { + return Err(format!("unexpected req_id expected {expected_id} got {id}")); + } + *self = Self::Complete(data); + Ok(()) } + Self::Complete(_) => Err("request already complete".to_owned()), + } + } + + fn to_finished(&self) -> Option<&T> { + match self { + Self::Active(_) => None, + Self::Complete(data) => Some(data), } - true } } @@ -233,9 +302,52 @@ mod tests { use beacon_chain::test_utils::{ generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, NumBlobs, }; + use lighthouse_network::service::api_types::{ + BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, + DataColumnsByRangeRequestId, Id, RangeRequestId, + }; use rand::SeedableRng; use std::sync::Arc; - use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E, SignedBeaconBlock}; + use types::{test_utils::XorShiftRng, Epoch, ForkName, MinimalEthSpec as E, SignedBeaconBlock}; + + fn components_id() -> ComponentsByRangeRequestId { + ComponentsByRangeRequestId { + id: 0, + requester: RangeRequestId::RangeSync { + chain_id: 1, + batch_id: Epoch::new(0), + }, + } + } + + fn blocks_id(parent_request_id: ComponentsByRangeRequestId) -> BlocksByRangeRequestId { + BlocksByRangeRequestId { + id: 1, + parent_request_id, + } + } + + fn blobs_id(parent_request_id: ComponentsByRangeRequestId) -> BlobsByRangeRequestId { + BlobsByRangeRequestId { + id: 1, + parent_request_id, + } + } + + fn columns_id( + id: Id, + parent_request_id: ComponentsByRangeRequestId, + ) -> DataColumnsByRangeRequestId { + DataColumnsByRangeRequestId { + id, + parent_request_id, + } + } + + fn is_finished(info: &RangeBlockComponentsRequest) -> bool { + let spec = test_spec::(); + info.responses(&spec).is_some() + } #[test] fn no_blobs_into_responses() { @@ -248,14 +360,15 @@ mod tests { .into() }) .collect::>>>(); - let mut info = RangeBlockComponentsRequest::::new(false, None, None); + + let blocks_req_id = blocks_id(components_id()); + let mut info = RangeBlockComponentsRequest::::new(blocks_req_id, None, None); // Send blocks and complete terminate response - info.add_blocks(blocks); + info.add_blocks(blocks_req_id, blocks).unwrap(); // Assert response is finished and RpcBlocks can be constructed - assert!(info.is_finished()); - info.into_responses(&test_spec::()).unwrap(); + info.responses(&test_spec::()).unwrap().unwrap(); } #[test] @@ -275,18 +388,22 @@ mod tests { .into() }) .collect::>>>(); - let mut info = RangeBlockComponentsRequest::::new(true, None, None); + + let components_id = components_id(); + let blocks_req_id = blocks_id(components_id); + let blobs_req_id = blobs_id(components_id); + let mut info = + RangeBlockComponentsRequest::::new(blocks_req_id, Some(blobs_req_id), None); // Send blocks and complete terminate response - info.add_blocks(blocks); + info.add_blocks(blocks_req_id, blocks).unwrap(); // Expect no blobs returned - info.add_blobs(vec![]); + info.add_blobs(blobs_req_id, vec![]).unwrap(); // Assert response is finished and RpcBlocks can be constructed, even if blobs weren't returned. // This makes sure we don't expect blobs here when they have expired. Checking this logic should // be hendled elsewhere. - assert!(info.is_finished()); - info.into_responses(&test_spec::()).unwrap(); + info.responses(&test_spec::()).unwrap().unwrap(); } #[test] @@ -304,40 +421,49 @@ mod tests { ) }) .collect::>(); + + let components_id = components_id(); + let blocks_req_id = blocks_id(components_id); + let columns_req_id = expects_custody_columns + .iter() + .enumerate() + .map(|(i, _)| columns_id(i as Id, components_id)) + .collect::>(); let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(expects_custody_columns.len()), + blocks_req_id, + None, + Some((columns_req_id.clone(), expects_custody_columns.clone())), ); // Send blocks and complete terminate response - info.add_blocks(blocks.iter().map(|b| b.0.clone().into()).collect()); + info.add_blocks( + blocks_req_id, + blocks.iter().map(|b| b.0.clone().into()).collect(), + ) + .unwrap(); // Assert response is not finished - assert!(!info.is_finished()); + assert!(!is_finished(&info)); // Send data columns for (i, &column_index) in expects_custody_columns.iter().enumerate() { info.add_custody_columns( + columns_req_id.get(i).copied().unwrap(), blocks .iter() .flat_map(|b| b.1.iter().filter(|d| d.index == column_index).cloned()) .collect(), - ); + ) + .unwrap(); if i < expects_custody_columns.len() - 1 { assert!( - !info.is_finished(), + !is_finished(&info), "requested should not be finished at loop {i}" ); - } else { - assert!( - info.is_finished(), - "request should be finishied at loop {i}" - ); } } // All completed construct response - info.into_responses(&spec).unwrap(); + info.responses(&spec).unwrap().unwrap(); } #[test] @@ -353,10 +479,18 @@ mod tests { (0..batched_column_requests.len() as u32).collect::>(); let num_of_data_column_requests = custody_column_request_ids.len(); + let components_id = components_id(); + let blocks_req_id = blocks_id(components_id); + let columns_req_id = batched_column_requests + .iter() + .enumerate() + .map(|(i, _)| columns_id(i as Id, components_id)) + .collect::>(); + let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(num_of_data_column_requests), + blocks_req_id, + None, + Some((columns_req_id.clone(), expects_custody_columns.clone())), ); let mut rng = XorShiftRng::from_seed([42; 16]); @@ -372,13 +506,18 @@ mod tests { .collect::>(); // Send blocks and complete terminate response - info.add_blocks(blocks.iter().map(|b| b.0.clone().into()).collect()); + info.add_blocks( + blocks_req_id, + blocks.iter().map(|b| b.0.clone().into()).collect(), + ) + .unwrap(); // Assert response is not finished - assert!(!info.is_finished()); + assert!(!is_finished(&info)); for (i, column_indices) in batched_column_requests.iter().enumerate() { // Send the set of columns in the same batch request info.add_custody_columns( + columns_req_id.get(i).copied().unwrap(), blocks .iter() .flat_map(|b| { @@ -387,19 +526,18 @@ mod tests { .cloned() }) .collect::>(), - ); + ) + .unwrap(); if i < num_of_data_column_requests - 1 { assert!( - !info.is_finished(), + !is_finished(&info), "requested should not be finished at loop {i}" ); - } else { - assert!(info.is_finished(), "request should be finished at loop {i}"); } } // All completed construct response - info.into_responses(&spec).unwrap(); + info.responses(&spec).unwrap().unwrap(); } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 671fa1e3b4..9a48e9aa5d 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1167,7 +1167,7 @@ impl SyncManager { self.on_range_components_response( id.parent_request_id, peer_id, - RangeBlockComponent::Block(resp), + RangeBlockComponent::Block(id, resp), ); } } @@ -1182,7 +1182,7 @@ impl SyncManager { self.on_range_components_response( id.parent_request_id, peer_id, - RangeBlockComponent::Blob(resp), + RangeBlockComponent::Blob(id, resp), ); } } @@ -1200,7 +1200,7 @@ impl SyncManager { self.on_range_components_response( id.parent_request_id, peer_id, - RangeBlockComponent::CustodyColumns(resp), + RangeBlockComponent::CustodyColumns(id, resp), ); } } @@ -1288,7 +1288,7 @@ impl SyncManager { } } } - Err(_) => match range_request_id.requester { + Err(e) => match range_request_id.requester { RangeRequestId::RangeSync { chain_id, batch_id } => { self.range_sync.inject_error( &mut self.network, @@ -1296,16 +1296,22 @@ impl SyncManager { batch_id, chain_id, range_request_id.id, + e, ); self.update_sync_state(); } - RangeRequestId::BackfillSync { batch_id } => match self - .backfill_sync - .inject_error(&mut self.network, batch_id, &peer_id, range_request_id.id) - { - Ok(_) => {} - Err(_) => self.update_sync_state(), - }, + RangeRequestId::BackfillSync { batch_id } => { + match self.backfill_sync.inject_error( + &mut self.network, + batch_id, + &peer_id, + range_request_id.id, + e, + ) { + Ok(_) => {} + Err(_) => self.update_sync_state(), + } + } }, } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 68a963dd41..16fcf93bcf 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -218,9 +218,18 @@ pub struct SyncNetworkContext { /// Small enumeration to make dealing with block and blob requests easier. pub enum RangeBlockComponent { - Block(RpcResponseResult>>>), - Blob(RpcResponseResult>>>), - CustodyColumns(RpcResponseResult>>>), + Block( + BlocksByRangeRequestId, + RpcResponseResult>>>, + ), + Blob( + BlobsByRangeRequestId, + RpcResponseResult>>>, + ), + CustodyColumns( + DataColumnsByRangeRequestId, + RpcResponseResult>>>, + ), } impl SyncNetworkContext { @@ -386,7 +395,16 @@ impl SyncNetworkContext { requester, }; - let _blocks_req_id = self.send_blocks_by_range_request(peer_id, request.clone(), id)?; + // Compute custody column peers before sending the blocks_by_range request. If we don't have + // enough peers, error here. + let data_column_requests = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { + let column_indexes = self.network_globals().sampling_columns.clone(); + Some(self.make_columns_by_range_requests(request.clone(), &column_indexes)?) + } else { + None + }; + + let blocks_req_id = self.send_blocks_by_range_request(peer_id, request.clone(), id)?; let blobs_req_id = if matches!(batch_type, ByRangeRequestType::BlocksAndBlobs) { Some(self.send_blobs_by_range_request( @@ -401,35 +419,27 @@ impl SyncNetworkContext { None }; - let (expects_columns, data_column_requests) = - if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { - let column_indexes = self.network_globals().sampling_columns.clone(); - let data_column_requests = self - .make_columns_by_range_requests(request, &column_indexes)? - .into_iter() - .map(|(peer_id, columns_by_range_request)| { - self.send_data_columns_by_range_request( - peer_id, - columns_by_range_request, - id, - ) - }) - .collect::, _>>()?; + let data_columns = if let Some(data_column_requests) = data_column_requests { + let data_column_requests = data_column_requests + .into_iter() + .map(|(peer_id, columns_by_range_request)| { + self.send_data_columns_by_range_request(peer_id, columns_by_range_request, id) + }) + .collect::, _>>()?; - ( - Some(column_indexes.into_iter().collect::>()), - Some(data_column_requests), - ) - } else { - (None, None) - }; + Some(( + data_column_requests, + self.network_globals() + .sampling_columns + .iter() + .cloned() + .collect::>(), + )) + } else { + None + }; - let expected_blobs = blobs_req_id.is_some(); - let info = RangeBlockComponentsRequest::new( - expected_blobs, - expects_columns, - data_column_requests.map(|items| items.len()), - ); + let info = RangeBlockComponentsRequest::new(blocks_req_id, blobs_req_id, data_columns); self.components_by_range_requests.insert(id, info); Ok(id.id) @@ -484,28 +494,33 @@ impl SyncNetworkContext { if let Err(e) = { let request = entry.get_mut(); match range_block_component { - RangeBlockComponent::Block(resp) => resp.map(|(blocks, _)| { - request.add_blocks(blocks); + RangeBlockComponent::Block(req_id, resp) => resp.and_then(|(blocks, _)| { + request + .add_blocks(req_id, blocks) + .map_err(RpcResponseError::BlockComponentCouplingError) }), - RangeBlockComponent::Blob(resp) => resp.map(|(blobs, _)| { - request.add_blobs(blobs); - }), - RangeBlockComponent::CustodyColumns(resp) => resp.map(|(custody_columns, _)| { - request.add_custody_columns(custody_columns); + RangeBlockComponent::Blob(req_id, resp) => resp.and_then(|(blobs, _)| { + request + .add_blobs(req_id, blobs) + .map_err(RpcResponseError::BlockComponentCouplingError) }), + RangeBlockComponent::CustodyColumns(req_id, resp) => { + resp.and_then(|(custody_columns, _)| { + request + .add_custody_columns(req_id, custody_columns) + .map_err(RpcResponseError::BlockComponentCouplingError) + }) + } } } { entry.remove(); return Some(Err(e)); } - if entry.get_mut().is_finished() { + if let Some(blocks_result) = entry.get().responses(&self.chain.spec) { + entry.remove(); // If the request is finished, dequeue everything - let request = entry.remove(); - let blocks = request - .into_responses(&self.chain.spec) - .map_err(RpcResponseError::BlockComponentCouplingError); - Some(blocks) + Some(blocks_result.map_err(RpcResponseError::BlockComponentCouplingError)) } else { None } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 70c7b6f98f..24045e901b 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -2,7 +2,7 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use super::RangeSyncType; use crate::metrics; use crate::network_beacon_processor::ChainSegmentProcessId; -use crate::sync::network_context::RangeRequestId; +use crate::sync::network_context::{RangeRequestId, RpcResponseError}; use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; @@ -879,6 +879,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, + err: RpcResponseError, ) -> ProcessingResult { let batch_state = self.visualize_batch_state(); if let Some(batch) = self.batches.get_mut(&batch_id) { @@ -901,9 +902,10 @@ impl SyncingChain { debug!( batch_epoch = %batch_id, batch_state = ?batch.state(), + error = ?err, %peer_id, %request_id, - "Batch failed. RPC Error" + "Batch download error" ); if let Some(active_requests) = self.peers.get_mut(peer_id) { active_requests.remove(&batch_id); diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index e4a20f6349..ab9a88e4ac 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -44,7 +44,7 @@ use super::chain_collection::{ChainCollection, SyncChainStatus}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::status::ToStatusMessage; -use crate::sync::network_context::SyncNetworkContext; +use crate::sync::network_context::{RpcResponseError, SyncNetworkContext}; use crate::sync::BatchProcessResult; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; @@ -348,10 +348,11 @@ where batch_id: BatchId, chain_id: ChainId, request_id: Id, + err: RpcResponseError, ) { // check that this request is pending match self.chains.call_by_id(chain_id, |chain| { - chain.inject_error(network, batch_id, &peer_id, request_id) + chain.inject_error(network, batch_id, &peer_id, request_id, err) }) { Ok((removed_chain, sync_type)) => { if let Some((removed_chain, remove_reason)) = removed_chain { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e34285edbc..7d086dcc32 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -776,6 +776,15 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) + .arg( + Arg::new("state-cache-headroom") + .long("state-cache-headroom") + .value_name("N") + .help("Minimum number of states to cull from the state cache when it gets full") + .default_value("1") + .action(ArgAction::Set) + .display_order(0) + ) .arg( Arg::new("block-cache-size") .long("block-cache-size") @@ -1646,5 +1655,13 @@ pub fn cli_app() -> Command { .hide(true) .display_order(0) ) + .arg( + Arg::new("invalid-block-roots") + .long("invalid-block-roots") + .value_name("FILE") + .help("Path to a comma separated file containing block roots that should be treated as invalid during block verification.") + .action(ArgAction::Set) + .hide(true) + ) .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index bcebc06c9c..8723c2d708 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -2,7 +2,7 @@ use account_utils::{read_input_from_user, STDIN_INPUTS_FLAG}; use beacon_chain::chain_config::{ DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, - DEFAULT_RE_ORG_PARENT_THRESHOLD, + DEFAULT_RE_ORG_PARENT_THRESHOLD, INVALID_HOLESKY_BLOCK_ROOT, }; use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::TrustedSetup; @@ -19,9 +19,10 @@ use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; use std::cmp::max; +use std::collections::HashSet; use std::fmt::Debug; use std::fs; -use std::io::IsTerminal; +use std::io::{IsTerminal, Read}; use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::num::NonZeroU16; @@ -449,6 +450,12 @@ pub fn get_config( client_config.chain.epochs_per_migration = epochs_per_migration; } + if let Some(state_cache_headroom) = + clap_utils::parse_optional(cli_args, "state-cache-headroom")? + { + client_config.store.state_cache_headroom = state_cache_headroom; + } + if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? { client_config.store.prune_blobs = prune_blobs; } @@ -897,6 +904,35 @@ pub fn get_config( client_config.chain.data_column_publishing_delay = Some(Duration::from_secs_f64(delay)); } + if let Some(invalid_block_roots_file_path) = + clap_utils::parse_optional::(cli_args, "invalid-block-roots")? + { + let mut file = std::fs::File::open(invalid_block_roots_file_path) + .map_err(|e| format!("Failed to open invalid-block-roots file: {}", e))?; + let mut contents = String::new(); + file.read_to_string(&mut contents) + .map_err(|e| format!("Failed to read invalid-block-roots file {}", e))?; + let invalid_block_roots: HashSet = contents + .split(',') + .filter_map( + |s| match Hash256::from_str(s.strip_prefix("0x").unwrap_or(s).trim()) { + Ok(block_root) => Some(block_root), + Err(error) => { + warn!(block_root = s, ?error, "Unable to parse invalid block root",); + None + } + }, + ) + .collect(); + client_config.chain.invalid_block_roots = invalid_block_roots; + } else if spec + .config_name + .as_ref() + .is_some_and(|network_name| network_name == "holesky") + { + client_config.chain.invalid_block_roots = HashSet::from([*INVALID_HOLESKY_BLOCK_ROOT]); + } + Ok(client_config) } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 64765fd66a..a84573eb40 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -21,6 +21,7 @@ pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_EPOCHS_PER_STATE_DIFF: u64 = 8; pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(64); pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); +pub const DEFAULT_STATE_CACHE_HEADROOM: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_COMPRESSION_LEVEL: i32 = 1; pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_HDIFF_BUFFER_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); @@ -35,6 +36,8 @@ pub struct StoreConfig { pub block_cache_size: NonZeroUsize, /// Maximum number of states to store in the in-memory state cache. pub state_cache_size: NonZeroUsize, + /// Minimum number of states to cull from the state cache upon fullness. + pub state_cache_headroom: NonZeroUsize, /// Compression level for blocks, state diffs and other compressed values. pub compression_level: i32, /// Maximum number of historic states to store in the in-memory historic state cache. @@ -107,6 +110,7 @@ impl Default for StoreConfig { Self { block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, state_cache_size: DEFAULT_STATE_CACHE_SIZE, + state_cache_headroom: DEFAULT_STATE_CACHE_HEADROOM, historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, hdiff_buffer_cache_size: DEFAULT_HDIFF_BUFFER_CACHE_SIZE, compression_level: DEFAULT_COMPRESSION_LEVEL, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 0a545529ca..6a30d8a428 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -73,7 +73,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// Cache of beacon states. /// /// LOCK ORDERING: this lock must always be locked *after* the `split` if both are required. - state_cache: Mutex>, + pub state_cache: Mutex>, /// Cache of historic states and hierarchical diff buffers. /// /// This cache is never pruned. It is only populated in response to historical queries from the @@ -215,7 +215,10 @@ impl HotColdDB, MemoryStore> { blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), - state_cache: Mutex::new(StateCache::new(config.state_cache_size)), + state_cache: Mutex::new(StateCache::new( + config.state_cache_size, + config.state_cache_headroom, + )), historic_state_cache: Mutex::new(HistoricStateCache::new( config.hdiff_buffer_cache_size, config.historic_state_cache_size, @@ -259,7 +262,10 @@ impl HotColdDB, BeaconNodeBackend> { cold_db: BeaconNodeBackend::open(&config, cold_path)?, hot_db, block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), - state_cache: Mutex::new(StateCache::new(config.state_cache_size)), + state_cache: Mutex::new(StateCache::new( + config.state_cache_size, + config.state_cache_headroom, + )), historic_state_cache: Mutex::new(HistoricStateCache::new( config.hdiff_buffer_cache_size, config.historic_state_cache_size, @@ -934,6 +940,7 @@ impl, Cold: ItemStore> HotColdDB &self, state_root: &Hash256, slot: Option, + update_cache: bool, ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); @@ -945,10 +952,10 @@ impl, Cold: ItemStore> HotColdDB // chain. This way we avoid returning a state that doesn't match `state_root`. self.load_cold_state(state_root) } else { - self.get_hot_state(state_root) + self.get_hot_state(state_root, update_cache) } } else { - match self.get_hot_state(state_root)? { + match self.get_hot_state(state_root, update_cache)? { Some(state) => Ok(Some(state)), None => self.load_cold_state(state_root), } @@ -998,21 +1005,27 @@ impl, Cold: ItemStore> HotColdDB } else { state_root }; + // It's a bit redundant but we elect to cache the state here and down below. let mut opt_state = self - .load_hot_state(&state_root)? + .load_hot_state(&state_root, true)? .map(|(state, _block_root)| (state_root, state)); if let Some((state_root, state)) = opt_state.as_mut() { state.update_tree_hash_cache()?; state.build_all_caches(&self.spec)?; - self.state_cache - .lock() - .put_state(*state_root, block_root, state)?; - debug!( - ?state_root, - slot = %state.slot(), - "Cached state" - ); + if let PutStateOutcome::New(deleted_states) = + self.state_cache + .lock() + .put_state(*state_root, block_root, state)? + { + debug!( + ?state_root, + state_slot = %state.slot(), + ?deleted_states, + location = "get_advanced_hot_state", + "Cached state", + ); + } } drop(split); Ok(opt_state) @@ -1109,6 +1122,8 @@ impl, Cold: ItemStore> HotColdDB /// Load an epoch boundary state by using the hot state summary look-up. /// /// Will fall back to the cold DB if a hot state summary is not found. + /// + /// NOTE: only used in tests at the moment pub fn load_epoch_boundary_state( &self, state_root: &Hash256, @@ -1119,9 +1134,11 @@ impl, Cold: ItemStore> HotColdDB }) = self.load_hot_state_summary(state_root)? { // NOTE: minor inefficiency here because we load an unnecessary hot state summary - let (state, _) = self.load_hot_state(&epoch_boundary_state_root)?.ok_or( - HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), - )?; + let (state, _) = self + .load_hot_state(&epoch_boundary_state_root, true)? + .ok_or(HotColdDBError::MissingEpochBoundaryState( + epoch_boundary_state_root, + ))?; Ok(Some(state)) } else { // Try the cold DB @@ -1445,23 +1462,32 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - // Put the state in the cache. - let block_root = state.get_latest_block_root(*state_root); - // Avoid storing states in the database if they already exist in the state cache. // The exception to this is the finalized state, which must exist in the cache before it // is stored on disk. - if let PutStateOutcome::Duplicate = - self.state_cache - .lock() - .put_state(*state_root, block_root, state)? - { - debug!( - slot = %state.slot(), - ?state_root, - "Skipping storage of cached state" - ); - return Ok(()); + match self.state_cache.lock().put_state( + *state_root, + state.get_latest_block_root(*state_root), + state, + )? { + PutStateOutcome::New(deleted_states) => { + debug!( + ?state_root, + state_slot = %state.slot(), + ?deleted_states, + location = "store_hot_state", + "Cached state", + ); + } + PutStateOutcome::Duplicate => { + debug!( + ?state_root, + state_slot = %state.slot(), + "State already exists in state cache", + ); + return Ok(()); + } + PutStateOutcome::Finalized => {} // Continue to store. } // On the epoch boundary, store the full state. @@ -1485,7 +1511,11 @@ impl, Cold: ItemStore> HotColdDB } /// Get a post-finalization state from the database or store. - pub fn get_hot_state(&self, state_root: &Hash256) -> Result>, Error> { + pub fn get_hot_state( + &self, + state_root: &Hash256, + update_cache: bool, + ) -> Result>, Error> { if let Some(state) = self.state_cache.lock().get_by_state_root(*state_root) { return Ok(Some(state)); } @@ -1495,19 +1525,33 @@ impl, Cold: ItemStore> HotColdDB warn!(?state_root, "State cache missed"); } - let state_from_disk = self.load_hot_state(state_root)?; + let state_from_disk = self.load_hot_state(state_root, update_cache)?; if let Some((mut state, block_root)) = state_from_disk { state.update_tree_hash_cache()?; state.build_all_caches(&self.spec)?; - self.state_cache - .lock() - .put_state(*state_root, block_root, &state)?; - debug!( - ?state_root, - slot = %state.slot(), - "Cached state" - ); + if update_cache { + if let PutStateOutcome::New(deleted_states) = + self.state_cache + .lock() + .put_state(*state_root, block_root, &state)? + { + debug!( + ?state_root, + state_slot = %state.slot(), + ?deleted_states, + location = "get_hot_state", + "Cached state", + ); + } + } else { + debug!( + ?state_root, + state_slot = %state.slot(), + "Did not cache state", + ); + } + Ok(Some(state)) } else { Ok(None) @@ -1523,6 +1567,7 @@ impl, Cold: ItemStore> HotColdDB pub fn load_hot_state( &self, state_root: &Hash256, + update_cache: bool, ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); @@ -1554,25 +1599,28 @@ impl, Cold: ItemStore> HotColdDB let mut state = if slot % E::slots_per_epoch() == 0 { boundary_state } else { - // Cache ALL intermediate states that are reached during block replay. We may want - // to restrict this in future to only cache epoch boundary states. At worst we will - // cache up to 32 states for each state loaded, which should not flush out the cache - // entirely. + // If replaying blocks, and `update_cache` is true, also cache the epoch boundary + // state that this state is based on. It may be useful as the basis of more states + // in the same epoch. let state_cache_hook = |state_root, state: &mut BeaconState| { + if !update_cache || state.slot() % E::slots_per_epoch() != 0 { + return Ok(()); + } // Ensure all caches are built before attempting to cache. state.update_tree_hash_cache()?; state.build_all_caches(&self.spec)?; let latest_block_root = state.get_latest_block_root(state_root); - if let PutStateOutcome::New = + if let PutStateOutcome::New(_) = self.state_cache .lock() .put_state(state_root, latest_block_root, state)? { debug!( ?state_root, - %slot, - "Cached ancestor state" + state_slot = %state.slot(), + descendant_slot = %slot, + "Cached ancestor state", ); } Ok(()) @@ -2619,10 +2667,15 @@ impl, Cold: ItemStore> HotColdDB return Ok(()); }; - // Load the split state so we can backtrack to find execution payloads. - let split_state = self.get_state(&split.state_root, Some(split.slot))?.ok_or( - HotColdDBError::MissingSplitState(split.state_root, split.slot), - )?; + // Load the split state so we can backtrack to find execution payloads. The split state + // should be in the state cache as the enshrined finalized state, so this should never + // cache miss. + let split_state = self + .get_state(&split.state_root, Some(split.slot), true)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + ))?; // The finalized block may or may not have its execution payload stored, depending on // whether it was at a skipped slot. However for a fully pruned database its parent @@ -3080,8 +3133,10 @@ pub fn migrate_database, Cold: ItemStore>( // Store slot -> state_root and state_root -> slot mappings. store.store_cold_state_summary(&state_root, slot, &mut cold_db_ops)?; } else { + // This is some state that we want to migrate to the freezer db. + // There is no reason to cache this state. let state: BeaconState = store - .get_hot_state(&state_root)? + .get_hot_state(&state_root, false)? .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 0d12bbdd60..8419dde4a2 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -27,8 +27,10 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> &self, store: &'a HotColdDB, ) -> Option> { + // Ancestor roots and their states are probably in the cold db + // but we set `update_cache` to false just in case let state = store - .get_state(&self.message().state_root(), Some(self.slot())) + .get_state(&self.message().state_root(), Some(self.slot()), false) .ok()??; Some(BlockRootsIterator::owned(store, state)) @@ -189,8 +191,10 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, E, let block = store .get_blinded_block(&block_hash)? .ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?; + // We are querying some block from the database. It's not clear if the block's state is useful, + // we elect not to cache it. let state = store - .get_state(&block.state_root(), Some(block.slot()))? + .get_state(&block.state_root(), Some(block.slot()), false)? .ok_or_else(|| BeaconStateError::MissingBeaconState(block.state_root().into()))?; Ok(Self::owned(store, state)) } @@ -362,8 +366,9 @@ fn next_historical_root_backtrack_state, Cold: Ite if new_state_slot >= historic_state_upper_limit { let new_state_root = current_state.get_state_root(new_state_slot)?; + // We are backtracking through historical states, we don't want to cache these. Ok(store - .get_state(new_state_root, Some(new_state_slot))? + .get_state(new_state_root, Some(new_state_slot), false)? .ok_or_else(|| BeaconStateError::MissingBeaconState((*new_state_root).into()))?) } else { Err(Error::HistoryUnavailable) diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 96e4de4639..281ecab152 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -33,26 +33,33 @@ pub struct SlotMap { #[derive(Debug)] pub struct StateCache { finalized_state: Option>, - states: LruCache>, + // Stores the tuple (state_root, state) as LruCache only returns the value on put and we need + // the state_root + states: LruCache)>, block_map: BlockMap, max_epoch: Epoch, + head_block_root: Hash256, + headroom: NonZeroUsize, } #[derive(Debug)] pub enum PutStateOutcome { Finalized, Duplicate, - New, + /// Includes deleted states as a result of this insertion + New(Vec), } #[allow(clippy::len_without_is_empty)] impl StateCache { - pub fn new(capacity: NonZeroUsize) -> Self { + pub fn new(capacity: NonZeroUsize, headroom: NonZeroUsize) -> Self { StateCache { finalized_state: None, states: LruCache::new(capacity), block_map: BlockMap::default(), max_epoch: Epoch::new(0), + head_block_root: Hash256::ZERO, + headroom, } } @@ -98,6 +105,13 @@ impl StateCache { Ok(()) } + /// Update the state cache's view of the enshrined head block. + /// + /// We never prune the unadvanced state for the head block. + pub fn update_head_block_root(&mut self, head_block_root: Hash256) { + self.head_block_root = head_block_root; + } + /// Rebase the given state on the finalized state in order to reduce its memory consumption. /// /// This function should only be called on states that are likely not to already share tree @@ -147,18 +161,26 @@ impl StateCache { self.max_epoch = std::cmp::max(state.current_epoch(), self.max_epoch); // If the cache is full, use the custom cull routine to make room. - if let Some(over_capacity) = self.len().checked_sub(self.capacity()) { - self.cull(over_capacity + 1); - } + let mut deleted_states = + if let Some(over_capacity) = self.len().checked_sub(self.capacity()) { + // The `over_capacity` should always be 0, but we add it here just in case. + self.cull(over_capacity + self.headroom.get()) + } else { + vec![] + }; // Insert the full state into the cache. - self.states.put(state_root, state.clone()); + if let Some((deleted_state_root, _)) = + self.states.put(state_root, (state_root, state.clone())) + { + deleted_states.push(deleted_state_root); + } // Record the connection from block root and slot to this state. let slot = state.slot(); self.block_map.insert(block_root, slot, state_root); - Ok(PutStateOutcome::New) + Ok(PutStateOutcome::New(deleted_states)) } pub fn get_by_state_root(&mut self, state_root: Hash256) -> Option> { @@ -167,7 +189,7 @@ impl StateCache { return Some(finalized_state.state.clone()); } } - self.states.get(&state_root).cloned() + self.states.get(&state_root).map(|(_, state)| state.clone()) } pub fn get_by_block_root( @@ -211,7 +233,7 @@ impl StateCache { /// - Mid-epoch unadvanced states. /// - Epoch-boundary states that are too old to be finalized. /// - Epoch-boundary states that could be finalized. - pub fn cull(&mut self, count: usize) { + pub fn cull(&mut self, count: usize) -> Vec { let cull_exempt = std::cmp::max( 1, self.len() * CULL_EXEMPT_NUMERATOR / CULL_EXEMPT_DENOMINATOR, @@ -222,7 +244,8 @@ impl StateCache { let mut mid_epoch_state_roots = vec![]; let mut old_boundary_state_roots = vec![]; let mut good_boundary_state_roots = vec![]; - for (&state_root, state) in self.states.iter().skip(cull_exempt) { + + for (&state_root, (_, state)) in self.states.iter().skip(cull_exempt) { let is_advanced = state.slot() > state.latest_block_header().slot; let is_boundary = state.slot() % E::slots_per_epoch() == 0; let could_finalize = @@ -236,7 +259,8 @@ impl StateCache { } } else if is_advanced { advanced_state_roots.push(state_root); - } else { + } else if state.get_latest_block_root(state_root) != self.head_block_root { + // Never prune the head state mid_epoch_state_roots.push(state_root); } @@ -248,15 +272,19 @@ impl StateCache { // Stage 2: delete. // This could probably be more efficient in how it interacts with the block map. - for state_root in advanced_state_roots - .iter() - .chain(mid_epoch_state_roots.iter()) - .chain(old_boundary_state_roots.iter()) - .chain(good_boundary_state_roots.iter()) + let state_roots_to_delete = advanced_state_roots + .into_iter() + .chain(old_boundary_state_roots) + .chain(mid_epoch_state_roots) + .chain(good_boundary_state_roots) .take(count) - { + .collect::>(); + + for state_root in &state_roots_to_delete { self.delete_state(state_root); } + + state_roots_to_delete } } diff --git a/book/.markdownlint.yml b/book/.markdownlint.yml index 5d6bda29f1..4f7d113364 100644 --- a/book/.markdownlint.yml +++ b/book/.markdownlint.yml @@ -8,7 +8,7 @@ MD010: MD013: false # MD028: set to false to allow blank line between blockquote: https://github.com/DavidAnson/markdownlint/blob/main/doc/md028.md -# This is because the blockquotes are shown separatedly (a deisred outcome) when having a blank line in between +# This is because the blockquotes are shown separately (a desired outcome) when having a blank line in between MD028: false # MD024: set siblings_only to true so that same headings with different parent headings are allowed diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 44d7702e5f..3d09e3a6a5 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,58 +2,54 @@ * [Introduction](./intro.md) * [Installation](./installation.md) - * [Pre-Built Binaries](./installation-binaries.md) - * [Docker](./docker.md) - * [Build from Source](./installation-source.md) - * [Raspberry Pi 4](./pi.md) - * [Cross-Compiling](./cross-compiling.md) - * [Homebrew](./homebrew.md) - * [Update Priorities](./installation-priorities.md) + * [Pre-Built Binaries](./installation_binaries.md) + * [Docker](./installation_docker.md) + * [Build from Source](./installation_source.md) + * [Cross-Compiling](./installation_cross_compiling.md) + * [Homebrew](./installation_homebrew.md) + * [Update Priorities](./installation_priorities.md) * [Run a Node](./run_a_node.md) -* [Become a Validator](./mainnet-validator.md) -* [Validator Management](./validator-management.md) - * [The `validator-manager` Command](./validator-manager.md) - * [Creating validators](./validator-manager-create.md) - * [Moving validators](./validator-manager-move.md) - * [Managing validators](./validator-manager-api.md) - * [Slashing Protection](./slashing-protection.md) - * [Voluntary Exits](./voluntary-exit.md) - * [Partial Withdrawals](./partial-withdrawal.md) - * [Validator Monitoring](./validator-monitoring.md) - * [Doppelganger Protection](./validator-doppelganger.md) - * [Suggested Fee Recipient](./suggested-fee-recipient.md) - * [Validator Graffiti](./graffiti.md) +* [Become a Validator](./mainnet_validator.md) +* [Validator Management](./validator_management.md) + * [The `validator-manager` Command](./validator_manager.md) + * [Creating validators](./validator_manager_create.md) + * [Moving validators](./validator_manager_move.md) + * [Managing validators](./validator_manager_api.md) + * [Slashing Protection](./validator_slashing_protection.md) + * [Voluntary Exits](./validator_voluntary_exit.md) + * [Validator Sweep](./validator_sweep.md) + * [Validator Monitoring](./validator_monitoring.md) + * [Doppelganger Protection](./validator_doppelganger.md) + * [Suggested Fee Recipient](./validator_fee_recipient.md) + * [Validator Graffiti](./validator_graffiti.md) * [APIs](./api.md) - * [Beacon Node API](./api-bn.md) - * [Lighthouse API](./api-lighthouse.md) - * [Validator Inclusion APIs](./validator-inclusion.md) - * [Validator Client API](./api-vc.md) - * [Endpoints](./api-vc-endpoints.md) - * [Authorization Header](./api-vc-auth-header.md) - * [Signature Header](./api-vc-sig-header.md) - * [Prometheus Metrics](./advanced_metrics.md) -* [Lighthouse UI (Siren)](./lighthouse-ui.md) - * [Configuration](./ui-configuration.md) - * [Authentication](./ui-authentication.md) - * [Usage](./ui-usage.md) - * [FAQs](./ui-faqs.md) + * [Beacon Node API](./api_bn.md) + * [Lighthouse API](./api_lighthouse.md) + * [Validator Inclusion APIs](./api_validator_inclusion.md) + * [Validator Client API](./api_vc.md) + * [Endpoints](./api_vc_endpoints.md) + * [Authorization Header](./api_vc_auth_header.md) + * [Prometheus Metrics](./api_metrics.md) +* [Lighthouse UI (Siren)](./ui.md) + * [Configuration](./ui_configuration.md) + * [Authentication](./ui_authentication.md) + * [Usage](./ui_usage.md) + * [FAQs](./ui_faqs.md) * [Advanced Usage](./advanced.md) - * [Checkpoint Sync](./checkpoint-sync.md) - * [Custom Data Directories](./advanced-datadir.md) - * [Proposer Only Beacon Nodes](./advanced-proposer-only.md) - * [Remote Signing with Web3Signer](./validator-web3signer.md) + * [Checkpoint Sync](./advanced_checkpoint_sync.md) + * [Custom Data Directories](./advanced_datadir.md) + * [Proposer Only Beacon Nodes](./advanced_proposer_only.md) + * [Remote Signing with Web3Signer](./advanced_web3signer.md) * [Database Configuration](./advanced_database.md) - * [Database Migrations](./database-migrations.md) - * [Key Management (Deprecated)](./key-management.md) - * [Key Recovery](./key-recovery.md) + * [Database Migrations](./advanced_database_migrations.md) + * [Key Recovery](./advanced_key_recovery.md) * [Advanced Networking](./advanced_networking.md) - * [Running a Slasher](./slasher.md) - * [Redundancy](./redundancy.md) - * [Release Candidates](./advanced-release-candidates.md) - * [MEV](./builders.md) - * [Merge Migration](./merge-migration.md) - * [Late Block Re-orgs](./late-block-re-orgs.md) - * [Blobs](./advanced-blobs.md) + * [Running a Slasher](./advanced_slasher.md) + * [Redundancy](./advanced_redundancy.md) + * [Release Candidates](./advanced_release_candidates.md) + * [MEV](./advanced_builders.md) + * [Late Block Re-orgs](./advanced_re-orgs.md) + * [Blobs](./advanced_blobs.md) * [Command Line Reference (CLI)](./help_general.md) * [Beacon Node](./help_bn.md) * [Validator Client](./help_vc.md) @@ -62,7 +58,11 @@ * [Import](./help_vm_import.md) * [Move](./help_vm_move.md) * [Contributing](./contributing.md) - * [Development Environment](./setup.md) + * [Development Environment](./contributing_setup.md) * [FAQs](./faq.md) * [Protocol Developers](./developers.md) * [Security Researchers](./security.md) +* [Archived](./archived.md) + * [Merge Migration](./archived_merge_migration.md) + * [Raspberry Pi 4](./archived_pi.md) + * [Key Management](./archived_key_management.md) diff --git a/book/src/advanced.md b/book/src/advanced.md index 1a882835a4..76a7fed202 100644 --- a/book/src/advanced.md +++ b/book/src/advanced.md @@ -6,19 +6,17 @@ elsewhere? This section provides detailed information about configuring Lighthouse for specific use cases, and tips about how things work under the hood. -* [Checkpoint Sync](./checkpoint-sync.md): quickly sync the beacon chain to perform validator duties. -* [Custom Data Directories](./advanced-datadir.md): modify the data directory to your preferred location. -* [Proposer Only Beacon Nodes](./advanced-proposer-only.md): beacon node only for proposer duty for increased anonymity. -* [Remote Signing with Web3Signer](./validator-web3signer.md): don't want to store your keystore in local node? Use web3signer. +* [Checkpoint Sync](./advanced_checkpoint_sync.md): quickly sync the beacon chain to perform validator duties. +* [Custom Data Directories](./advanced_datadir.md): modify the data directory to your preferred location. +* [Proposer Only Beacon Nodes](./advanced_proposer_only.md): beacon node only for proposer duty for increased anonymity. +* [Remote Signing with Web3Signer](./advanced_web3signer.md): don't want to store your keystore in local node? Use web3signer. * [Database Configuration](./advanced_database.md): understanding space-time trade-offs in the database. -* [Database Migrations](./database-migrations.md): have a look at all previous Lighthouse database scheme versions. -* [Key Management](./key-management.md): explore how to generate wallet with Lighthouse. -* [Key Recovery](./key-recovery.md): explore how to recover wallet and validator with Lighthouse. +* [Database Migrations](./advanced_database_migrations.md): have a look at all previous Lighthouse database scheme versions. +* [Key Recovery](./advanced_key_recovery.md): explore how to recover wallet and validator with Lighthouse. * [Advanced Networking](./advanced_networking.md): open your ports to have a diverse and healthy set of peers. -* [Running a Slasher](./slasher.md): contribute to the health of the network by running a slasher. -* [Redundancy](./redundancy.md): want to have more than one beacon node as backup? This is for you. -* [Release Candidates](./advanced-release-candidates.md): latest release of Lighthouse to get feedback from users. -* [Maximal Extractable Value](./builders.md): use external builders for a potential higher rewards during block proposals -* [Merge Migration](./merge-migration.md): look at what you need to do during a significant network upgrade: The Merge -* [Late Block Re-orgs](./late-block-re-orgs.md): read information about Lighthouse late block re-orgs. -* [Blobs](./advanced-blobs.md): information about blobs in Deneb upgrade +* [Running a Slasher](./advanced_slasher.md): contribute to the health of the network by running a slasher. +* [Redundancy](./advanced_redundancy.md): want to have more than one beacon node as backup? This is for you. +* [Release Candidates](./advanced_release_candidates.md): latest release of Lighthouse to get feedback from users. +* [Maximal Extractable Value](./advanced_builders.md): use external builders for a potential higher rewards during block proposals +* [Late Block Re-orgs](./advanced_re-orgs.md): read information about Lighthouse late block re-orgs. +* [Blobs](./advanced_blobs.md): information about blobs in Deneb upgrade diff --git a/book/src/advanced-blobs.md b/book/src/advanced_blobs.md similarity index 96% rename from book/src/advanced-blobs.md rename to book/src/advanced_blobs.md index 785bd5797d..aa995b8e1d 100644 --- a/book/src/advanced-blobs.md +++ b/book/src/advanced_blobs.md @@ -38,4 +38,4 @@ In the Deneb network upgrade, one of the changes is the implementation of EIP-48 curl "http://localhost:5052/lighthouse/database/info" | jq ``` - Refer to [Lighthouse API](./api-lighthouse.md#lighthousedatabaseinfo) for an example response. + Refer to [Lighthouse API](./api_lighthouse.md#lighthousedatabaseinfo) for an example response. diff --git a/book/src/builders.md b/book/src/advanced_builders.md similarity index 95% rename from book/src/builders.md rename to book/src/advanced_builders.md index 5b8e9ddb8b..d9468898b4 100644 --- a/book/src/builders.md +++ b/book/src/advanced_builders.md @@ -83,11 +83,11 @@ is something afoot. To update gas limit per-validator you can use the [standard key manager API][gas-limit-api]. -Alternatively, you can use the [lighthouse API](api-vc-endpoints.md). See below for an example. +Alternatively, you can use the [lighthouse API](api_vc_endpoints.md). See below for an example. ### Enable/Disable builder proposals via HTTP -Use the [lighthouse API](api-vc-endpoints.md) to enable/disable use of the builder API on a per-validator basis. +Use the [lighthouse API](api_vc_endpoints.md) to enable/disable use of the builder API on a per-validator basis. You can also update the configured gas limit with these requests. #### `PATCH /lighthouse/validators/:voting_pubkey` @@ -98,7 +98,7 @@ You can also update the configured gas limit with these requests. |-------------------|--------------------------------------------| | Path | `/lighthouse/validators/:voting_pubkey` | | Method | PATCH | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200, 400 | #### Example Path @@ -147,7 +147,7 @@ INFO Published validator registrations to the builder network, count: 3, service ### Fee Recipient -Refer to [suggested fee recipient](suggested-fee-recipient.md) documentation. +Refer to [suggested fee recipient](validator_fee_recipient.md) documentation. ### Validator definitions example @@ -244,16 +244,9 @@ INFO Builder payload ignored INFO Chain is unhealthy, using local payload ``` -In case of fallback you should see a log indicating that the locally produced payload was -used in place of one from the builder: - -```text -INFO Reconstructing a full block using a local payload -``` - ## Information for block builders and relays -Block builders and relays can query beacon node events from the [Events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream). An example of querying the payload attributes in the Events API is outlined in [Beacon node API - Events API](./api-bn.md#events-api) +Block builders and relays can query beacon node events from the [Events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream). An example of querying the payload attributes in the Events API is outlined in [Beacon node API - Events API](./api_bn.md#events-api) [mev-rs]: https://github.com/ralexstokes/mev-rs [mev-boost]: https://github.com/flashbots/mev-boost diff --git a/book/src/checkpoint-sync.md b/book/src/advanced_checkpoint_sync.md similarity index 99% rename from book/src/checkpoint-sync.md rename to book/src/advanced_checkpoint_sync.md index 8dd63f77c9..45aed6ef58 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/advanced_checkpoint_sync.md @@ -134,7 +134,7 @@ Important information to be aware of: * It is safe to interrupt state reconstruction by gracefully terminating the node – it will pick up from where it left off when it restarts. * You can start reconstruction from the HTTP API, and view its progress. See the - [`/lighthouse/database`](./api-lighthouse.md) APIs. + [`/lighthouse/database`](./api_lighthouse.md) APIs. For more information on historic state storage see the [Database Configuration](./advanced_database.md) page. diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index b558279730..4e77046c2d 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -61,6 +61,26 @@ that we have observed are: to apply. We observed no significant performance benefit from `--hierarchy-exponents 5,7,11`, and a substantial increase in space consumed. +The following table lists the data for different configurations. Note that the disk space requirement is for the `chain_db` and `freezer_db`, excluding the `blobs_db`. + +| Hierarchy Exponents | Storage Requirement | Sequential Slot Query | Uncached Query | Time to Sync | +|---|---|---|---|---| +| 5,9,11,13,16,18,21 (default) | 418 GiB | 250-700 ms | up to 10 s | 1 week | +| 5,7,11 (frequent snapshots) | 589 GiB | 250-700 ms | up to 6 s | 1 week | +| 0,5,7,11 (per-slot diffs) | 2500 GiB | 250-700 ms | up to 4 s | 7 weeks | + +[Jim](https://github.com/mcdee) has done some experiments to study the response time of querying random slots (uncached query) for `--hierarchy-exponents 0,5,7,11` (per-slot diffs) and `--hierarchy-exponents 5,9,11,13,17,21` (per-epoch diffs), as show in the figures below. From the figures, two points can be concluded: + +- response time (y-axis) increases with slot number (x-axis) due to state growth. +- response time for per-slot configuration in general is 2x faster than that of per-epoch. + +In short, setting different configurations is a trade-off between disk space requirement, sync time and response time. The data presented here is useful to help users choosing the configuration that suit their needs. + +_We acknowledge the data provided by [Jim](https://github.com/mcdee) and his consent for us to share it here._ + +![Response time for per-epoch archive](./imgs/per-epoch.png) +![Response time for per-slot archive](./imgs/per-slot.png) + If in doubt, we recommend running with the default configuration! It takes a long time to reconstruct states in any given configuration, so it might be some time before the optimal configuration is determined. diff --git a/book/src/database-migrations.md b/book/src/advanced_database_migrations.md similarity index 100% rename from book/src/database-migrations.md rename to book/src/advanced_database_migrations.md diff --git a/book/src/advanced-datadir.md b/book/src/advanced_datadir.md similarity index 98% rename from book/src/advanced-datadir.md rename to book/src/advanced_datadir.md index 7ad993a107..1be8ed5a34 100644 --- a/book/src/advanced-datadir.md +++ b/book/src/advanced_datadir.md @@ -12,7 +12,7 @@ lighthouse --network mainnet --datadir /var/lib/my-custom-dir bn --staking lighthouse --network mainnet --datadir /var/lib/my-custom-dir vc ``` -The first step creates a `validators` directory under `/var/lib/my-custom-dir` which contains the imported keys and [`validator_definitions.yml`](./validator-management.md). +The first step creates a `validators` directory under `/var/lib/my-custom-dir` which contains the imported keys and [`validator_definitions.yml`](./validator_management.md). After that, we simply run the beacon chain and validator client with the custom dir path. ## Relative Paths diff --git a/book/src/key-recovery.md b/book/src/advanced_key_recovery.md similarity index 100% rename from book/src/key-recovery.md rename to book/src/advanced_key_recovery.md diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 0dc1000aa0..0dc53bd42a 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -123,8 +123,12 @@ Lighthouse listens for connections, and the parameters used to tell other peers how to connect to your node. This distinction is relevant and applies to most nodes that do not run directly on a public network. +Since Lighthouse v7.0.0, Lighthouse listens to both IPv4 and IPv6 by default if it detects a globally routable IPv6 address. This means that dual-stack is enabled by default. + ### Configuring Lighthouse to listen over IPv4/IPv6/Dual stack +To listen over only IPv4 and not IPv6, use the flag `--listen-address 0.0.0.0`. + To listen over only IPv6 use the same parameters as done when listening over IPv4 only: @@ -136,7 +140,7 @@ TCP and UDP. If the specified port is 9909, QUIC will use port 9910 for IPv6 UDP connections. This can be configured with `--quic-port`. -To listen over both IPv4 and IPv6: +To listen over both IPv4 and IPv6 and using a different port for IPv6:: - Set two listening addresses using the `--listen-address` flag twice ensuring the two addresses are one IPv4, and the other IPv6. When doing so, the @@ -165,7 +169,7 @@ To listen over both IPv4 and IPv6: > It listens on the default value of --port6 (`9000`) for both UDP and TCP. > QUIC will use port `9001` for UDP, which is the default `--port6` value (`9000`) + 1. -> When using `--listen-address :: --listen-address --port 9909 --discovery-port6 9999`, listening will be set up as follows: +> When using `--listen-address :: --listen-address 0.0.0.0 --port 9909 --discovery-port6 9999`, listening will be set up as follows: > > **IPv4**: > diff --git a/book/src/advanced-proposer-only.md b/book/src/advanced_proposer_only.md similarity index 97% rename from book/src/advanced-proposer-only.md rename to book/src/advanced_proposer_only.md index 1ea3610988..f55e51606c 100644 --- a/book/src/advanced-proposer-only.md +++ b/book/src/advanced_proposer_only.md @@ -56,7 +56,7 @@ these nodes for added security). The intended set-up to take advantage of this mechanism is to run one (or more) normal beacon nodes in conjunction with one (or more) proposer-only beacon -nodes. See the [Redundancy](./redundancy.md) section for more information about +nodes. See the [Redundancy](./advanced_redundancy.md) section for more information about setting up redundant beacon nodes. The proposer-only beacon nodes should be setup to use a different IP address than the primary (non proposer-only) nodes. For added security, the IP addresses of the proposer-only nodes should be diff --git a/book/src/late-block-re-orgs.md b/book/src/advanced_re-orgs.md similarity index 100% rename from book/src/late-block-re-orgs.md rename to book/src/advanced_re-orgs.md diff --git a/book/src/redundancy.md b/book/src/advanced_redundancy.md similarity index 94% rename from book/src/redundancy.md rename to book/src/advanced_redundancy.md index daf0eb4a5b..4582866657 100644 --- a/book/src/redundancy.md +++ b/book/src/advanced_redundancy.md @@ -9,7 +9,7 @@ There are three places in Lighthouse where redundancy is notable: We mention (3) since it is unsafe and should not be confused with the other two uses of redundancy. **Running the same validator keypair in more than one validator client (Lighthouse, or otherwise) will eventually lead to slashing.** -See [Slashing Protection](./slashing-protection.md) for more information. +See [Slashing Protection](./validator_slashing_protection.md) for more information. From this paragraph, this document will *only* refer to the first two items (1, 2). We *never* recommend that users implement redundancy for validator keypairs. @@ -58,8 +58,8 @@ following flags: > Note: You could also use `--http-address 0.0.0.0`, but this allows *any* external IP address to access the HTTP server. As such, a firewall should be configured to deny unauthorized access to port `5052`. -- `--execution-endpoint`: see [Merge Migration](./merge-migration.md). -- `--execution-jwt`: see [Merge Migration](./merge-migration.md). +- `--execution-endpoint`: see [Merge Migration](./archived_merge_migration.md). +- `--execution-jwt`: see [Merge Migration](./archived_merge_migration.md). For example one could use the following command to provide a backup beacon node: @@ -107,7 +107,7 @@ The default is `--broadcast subscriptions`. To also broadcast blocks for example Lighthouse previously supported redundant execution nodes for fetching data from the deposit contract. On merged networks *this is no longer supported*. Each Lighthouse beacon node must be configured in a 1:1 relationship with an execution node. For more information on the rationale -behind this decision please see the [Merge Migration](./merge-migration.md) documentation. +behind this decision please see the [Merge Migration](./archived_merge_migration.md) documentation. To achieve redundancy we recommend configuring [Redundant beacon nodes](#redundant-beacon-nodes) where each has its own execution engine. diff --git a/book/src/advanced-release-candidates.md b/book/src/advanced_release_candidates.md similarity index 100% rename from book/src/advanced-release-candidates.md rename to book/src/advanced_release_candidates.md diff --git a/book/src/slasher.md b/book/src/advanced_slasher.md similarity index 99% rename from book/src/slasher.md rename to book/src/advanced_slasher.md index 3310f6c9ef..b354c9deb2 100644 --- a/book/src/slasher.md +++ b/book/src/advanced_slasher.md @@ -81,7 +81,7 @@ WARN Slasher backend override failed advice: delete old MDBX database or enab In this case you should either obtain a Lighthouse binary with the MDBX backend enabled, or delete the files for the old backend. The pre-built Lighthouse binaries and Docker images have MDBX enabled, -or if you're [building from source](./installation-source.md) you can enable the `slasher-mdbx` feature. +or if you're [building from source](./installation_source.md) you can enable the `slasher-mdbx` feature. To delete the files, use the `path` from the `WARN` log, and then delete the `mbdx.dat` and `mdbx.lck` files. diff --git a/book/src/validator-web3signer.md b/book/src/advanced_web3signer.md similarity index 95% rename from book/src/validator-web3signer.md rename to book/src/advanced_web3signer.md index 6a518af3cf..6145fd4a71 100644 --- a/book/src/validator-web3signer.md +++ b/book/src/advanced_web3signer.md @@ -30,7 +30,7 @@ or effectiveness. ## Usage A remote signing validator is added to Lighthouse in much the same way as one that uses a local -keystore, via the [`validator_definitions.yml`](./validator-management.md) file or via the [`POST /lighthouse/validators/web3signer`](./api-vc-endpoints.md#post-lighthousevalidatorsweb3signer) API endpoint. +keystore, via the [`validator_definitions.yml`](./validator_management.md) file or via the [`POST /lighthouse/validators/web3signer`](./api_vc_endpoints.md#post-lighthousevalidatorsweb3signer) API endpoint. Here is an example of a `validator_definitions.yml` file containing one validator which uses a remote signer: diff --git a/book/src/api.md b/book/src/api.md index 5837ad9654..912c8658b6 100644 --- a/book/src/api.md +++ b/book/src/api.md @@ -5,5 +5,5 @@ RESTful HTTP/JSON APIs. There are two APIs served by Lighthouse: -- [Beacon Node API](./api-bn.md) -- [Validator Client API](./api-vc.md) +- [Beacon Node API](./api_bn.md) +- [Validator Client API](./api_vc.md) diff --git a/book/src/api-bn.md b/book/src/api_bn.md similarity index 100% rename from book/src/api-bn.md rename to book/src/api_bn.md diff --git a/book/src/api-lighthouse.md b/book/src/api_lighthouse.md similarity index 98% rename from book/src/api-lighthouse.md rename to book/src/api_lighthouse.md index 5428ab8f9a..b65bef4762 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api_lighthouse.md @@ -347,11 +347,11 @@ curl -X GET "http://localhost:5052/lighthouse/proto_array" -H "accept: applicat ## `/lighthouse/validator_inclusion/{epoch}/{validator_id}` -See [Validator Inclusion APIs](./validator-inclusion.md). +See [Validator Inclusion APIs](./api_validator_inclusion.md). ## `/lighthouse/validator_inclusion/{epoch}/global` -See [Validator Inclusion APIs](./validator-inclusion.md). +See [Validator Inclusion APIs](./api_validator_inclusion.md). ## `/lighthouse/eth1/syncing` @@ -565,7 +565,7 @@ For archive nodes, the `anchor` will be: indicating that all states with slots `>= 0` are available, i.e., full state history. For more information on the specific meanings of these fields see the docs on [Checkpoint -Sync](./checkpoint-sync.md#reconstructing-states). +Sync](./advanced_checkpoint_sync.md#reconstructing-states). ## `/lighthouse/merge_readiness` @@ -812,9 +812,15 @@ Checks if the ports are open. curl -X GET "http://localhost:5052/lighthouse/nat" | jq ``` -An open port will return: +An example of response: ```json { - "data": true + "data": { + "discv5_ipv4": true, + "discv5_ipv6": false, + "libp2p_ipv4": true, + "libp2p_ipv6": false + } } +``` diff --git a/book/src/advanced_metrics.md b/book/src/api_metrics.md similarity index 97% rename from book/src/advanced_metrics.md rename to book/src/api_metrics.md index 323ba8f58a..c124d3acb7 100644 --- a/book/src/advanced_metrics.md +++ b/book/src/api_metrics.md @@ -68,7 +68,7 @@ The specification for the monitoring endpoint can be found here: - -_Note: the similarly named [Validator Monitor](./validator-monitoring.md) feature is entirely +_Note: the similarly named [Validator Monitor](./validator_monitoring.md) feature is entirely independent of remote metric monitoring_. ### Update Period diff --git a/book/src/validator-inclusion.md b/book/src/api_validator_inclusion.md similarity index 100% rename from book/src/validator-inclusion.md rename to book/src/api_validator_inclusion.md diff --git a/book/src/api-vc.md b/book/src/api_vc.md similarity index 91% rename from book/src/api-vc.md rename to book/src/api_vc.md index 630a032006..f5df5df76c 100644 --- a/book/src/api-vc.md +++ b/book/src/api_vc.md @@ -6,11 +6,10 @@ of validators and keys. The API includes all of the endpoints from the [standard keymanager API](https://ethereum.github.io/keymanager-APIs/) that is implemented by other clients and remote signers. It also includes some Lighthouse-specific endpoints which are described in -[Endpoints](./api-vc-endpoints.md). +[Endpoints](./api_vc_endpoints.md). > Note: All requests to the HTTP server must supply an -> [`Authorization`](./api-vc-auth-header.md) header. All responses contain a -> [`Signature`](./api-vc-sig-header.md) header for optional verification. +> [`Authorization`](./api_vc_auth_header.md) header. ## Starting the server diff --git a/book/src/api-vc-auth-header.md b/book/src/api_vc_auth_header.md similarity index 100% rename from book/src/api-vc-auth-header.md rename to book/src/api_vc_auth_header.md diff --git a/book/src/api-vc-endpoints.md b/book/src/api_vc_endpoints.md similarity index 97% rename from book/src/api-vc-endpoints.md rename to book/src/api_vc_endpoints.md index 98605a3dcd..a7c6f0ad5e 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api_vc_endpoints.md @@ -19,7 +19,7 @@ | [`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. | | [`GET /lighthouse/logs`](#get-lighthouselogs) | Get logs | -The query to Lighthouse API endpoints requires authorization, see [Authorization Header](./api-vc-auth-header.md). +The query to Lighthouse API endpoints requires authorization, see [Authorization Header](./api_vc_auth_header.md). In addition to the above endpoints Lighthouse also supports all of the [standard keymanager APIs](https://ethereum.github.io/keymanager-APIs/). @@ -33,7 +33,7 @@ Returns the software version and `git` commit hash for the Lighthouse binary. |-------------------|--------------------------------------------| | Path | `/lighthouse/version` | | Method | GET | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | Command: @@ -71,7 +71,7 @@ Returns information regarding the health of the host machine. |-------------------|--------------------------------------------| | Path | `/lighthouse/health` | | Method | GET | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | *Note: this endpoint is presently only available on Linux.* @@ -132,7 +132,7 @@ Returns information regarding the health of the host machine. |-------------------|--------------------------------------------| | Path | `/lighthouse/ui/health` | | Method | GET | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | Command: @@ -178,7 +178,7 @@ Returns the graffiti that will be used for the next block proposal of each valid |-------------------|--------------------------------------------| | Path | `/lighthouse/ui/graffiti` | | Method | GET | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | Command: @@ -210,7 +210,7 @@ Returns the Ethereum proof-of-stake consensus specification loaded for this vali |-------------------|--------------------------------------------| | Path | `/lighthouse/spec` | | Method | GET | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | Command: @@ -326,7 +326,7 @@ Example Response Body ## `GET /lighthouse/auth` -Fetch the filesystem path of the [authorization token](./api-vc-auth-header.md). +Fetch the filesystem path of the [authorization token](./api_vc_auth_header.md). Unlike the other endpoints this may be called *without* providing an authorization token. This API is intended to be called from the same machine as the validator client, so that the token @@ -365,7 +365,7 @@ Lists all validators managed by this validator client. |-------------------|--------------------------------------------| | Path | `/lighthouse/validators` | | Method | GET | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | Command: @@ -409,7 +409,7 @@ Get a validator by their `voting_pubkey`. |-------------------|--------------------------------------------| | Path | `/lighthouse/validators/:voting_pubkey` | | Method | GET | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200, 400 | Command: @@ -441,7 +441,7 @@ and `graffiti`. The following example updates a validator from `enabled: true` |-------------------|--------------------------------------------| | Path | `/lighthouse/validators/:voting_pubkey` | | Method | PATCH | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200, 400 | Example Request Body @@ -491,7 +491,7 @@ Validators are generated from the mnemonic according to |-------------------|--------------------------------------------| | Path | `/lighthouse/validators` | | Method | POST | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | ### Example Request Body @@ -580,7 +580,7 @@ Import a keystore into the validator client. |-------------------|--------------------------------------------| | Path | `/lighthouse/validators/keystore` | | Method | POST | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | ### Example Request Body @@ -676,7 +676,7 @@ generated with the path `m/12381/3600/i/42`. |-------------------|--------------------------------------------| | Path | `/lighthouse/validators/mnemonic` | | Method | POST | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | ### Example Request Body @@ -739,7 +739,7 @@ Create any number of new validators, all of which will refer to a |-------------------|--------------------------------------------| | Path | `/lighthouse/validators/web3signer` | | Method | POST | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200, 400 | ### Example Request Body diff --git a/book/src/key-management.md b/book/src/archived-key-management.md similarity index 98% rename from book/src/key-management.md rename to book/src/archived-key-management.md index fa6e99a2aa..3f600794e0 100644 --- a/book/src/key-management.md +++ b/book/src/archived-key-management.md @@ -1,4 +1,4 @@ -# Key Management (Deprecated) +# Key Management [launchpad]: https://launchpad.ethereum.org/ @@ -22,7 +22,7 @@ Rather than continuing to read this page, we recommend users visit either: - The [Staking Launchpad][launchpad] for detailed, beginner-friendly instructions. - The [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) for a CLI tool used by the [Staking Launchpad][launchpad]. -- The [validator-manager documentation](./validator-manager.md) for a Lighthouse-specific tool for streamlined validator management tools. +- The [validator-manager documentation](./validator_manager.md) for a Lighthouse-specific tool for streamlined validator management tools. ## The `lighthouse account-manager` diff --git a/book/src/merge-migration.md b/book/src/archived-merge-migration.md similarity index 99% rename from book/src/merge-migration.md rename to book/src/archived-merge-migration.md index 7a123254bf..ac9c78c5e3 100644 --- a/book/src/merge-migration.md +++ b/book/src/archived-merge-migration.md @@ -14,7 +14,7 @@ the merge: 2. If your Lighthouse node has validators attached you *must* nominate an Ethereum address to receive transactions tips from blocks proposed by your validators. These changes should be made to your `lighthouse vc` configuration, and are covered on the - [Suggested fee recipient](./suggested-fee-recipient.md) page. + [Suggested fee recipient](./validator_fee_recipient.md) page. Additionally, you *must* update Lighthouse to v3.0.0 (or later), and must update your execution engine to a merge-ready version. diff --git a/book/src/archived.md b/book/src/archived.md new file mode 100644 index 0000000000..7b6e4b7e8e --- /dev/null +++ b/book/src/archived.md @@ -0,0 +1,3 @@ +# Archived + +This section keeps the topics that are deprecated or less applicable for archived purposes. diff --git a/book/src/pi.md b/book/src/archived_pi.md similarity index 91% rename from book/src/pi.md rename to book/src/archived_pi.md index b91ecab548..6afbcebd66 100644 --- a/book/src/pi.md +++ b/book/src/archived_pi.md @@ -7,7 +7,7 @@ Tested on: - Raspberry Pi 4 Model B (4GB) - `Ubuntu 20.04 LTS (GNU/Linux 5.4.0-1011-raspi aarch64)` -*Note: [Lighthouse supports cross-compiling](./cross-compiling.md) to target a +*Note: [Lighthouse supports cross-compiling](./installation_cross_compiling.md) to target a Raspberry Pi (`aarch64`). Compiling on a faster machine (i.e., `x86_64` desktop) may be convenient.* @@ -58,7 +58,7 @@ make > > Compiling Lighthouse can take up to an hour. The safety guarantees provided by the Rust language unfortunately result in a lengthy compilation time on a low-spec CPU like a Raspberry Pi. For faster -compilation on low-spec hardware, try [cross-compiling](./cross-compiling.md) on a more powerful +compilation on low-spec hardware, try [cross-compiling](./installation_cross_compiling.md) on a more powerful computer (e.g., compile for RasPi from your desktop computer). Once installation has finished, confirm Lighthouse is installed by viewing the diff --git a/book/src/contributing.md b/book/src/contributing.md index 312acccbc0..332afbfd70 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -15,7 +15,7 @@ to work on. To start contributing, 1. Read our [how to contribute](https://github.com/sigp/lighthouse/blob/unstable/CONTRIBUTING.md) document. -2. Setup a [development environment](./setup.md). +2. Setup a [development environment](./contributing_setup.md). 3. Browse through the [open issues](https://github.com/sigp/lighthouse/issues) (tip: look for the [good first issue](https://github.com/sigp/lighthouse/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) @@ -127,5 +127,5 @@ suggest: - [Rust by example](https://doc.rust-lang.org/stable/rust-by-example/) - [Learning Rust With Entirely Too Many Linked Lists](http://cglab.ca/~abeinges/blah/too-many-lists/book/) - [Rustlings](https://github.com/rustlings/rustlings) -- [Rust Exercism](https://exercism.io/tracks/rust) +- [Rust Exercism](https://exercism.org/tracks/rust) - [Learn X in Y minutes - Rust](https://learnxinyminutes.com/docs/rust/) diff --git a/book/src/setup.md b/book/src/contributing_setup.md similarity index 100% rename from book/src/setup.md rename to book/src/contributing_setup.md diff --git a/book/src/faq.md b/book/src/faq.md index d23951c8c7..a741834501 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -146,7 +146,7 @@ An example of the full log is shown below: WARN BlockProcessingFailure outcome: MissingBeaconBlock(0xbdba211f8d72029554e405d8e4906690dca807d1d7b1bc8c9b88d7970f1648bc), msg: unexpected condition in processing block. ``` -`MissingBeaconBlock` suggests that the database has corrupted. You should wipe the database and use [Checkpoint Sync](./checkpoint-sync.md) to resync the beacon chain. +`MissingBeaconBlock` suggests that the database has corrupted. You should wipe the database and use [Checkpoint Sync](./advanced_checkpoint_sync.md) to resync the beacon chain. ### After checkpoint sync, the progress of `downloading historical blocks` is slow. Why? @@ -281,7 +281,7 @@ You should **never** use duplicate/redundant validator keypairs or validator cli duplicate your JSON keystores and don't run `lighthouse vc` twice). This will lead to slashing. However, there are some components which can be configured with redundancy. See the -[Redundancy](./redundancy.md) guide for more information. +[Redundancy](./advanced_redundancy.md) guide for more information. ### I am missing attestations. Why? @@ -323,7 +323,7 @@ Another possible reason for missing the head vote is due to a chain "reorg". A r ### Can I submit a voluntary exit message without running a beacon node? -Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/voluntary-exit.md). +Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/docs/voluntary-exit.md). It is also noted that you can submit your BLS-to-execution-change message to update your withdrawal credentials from type `0x00` to `0x01` using the same link. @@ -341,13 +341,13 @@ No. You can just import new validator keys to the destination directory. If the Generally yes. -If you do not want to stop `lighthouse vc`, you can use the [key manager API](./api-vc-endpoints.md) to import keys. +If you do not want to stop `lighthouse vc`, you can use the [key manager API](./api_vc_endpoints.md) to import keys. ### How can I delete my validator once it is imported? Lighthouse supports the [KeyManager API](https://ethereum.github.io/keymanager-APIs/#/Local%20Key%20Manager/deleteKeys) to delete validators and remove them from the `validator_definitions.yml` file. To do so, start the validator client with the flag `--http` and call the API. -If you are looking to delete the validators in one node and import it to another, you can use the [validator-manager](./validator-manager-move.md) to move the validators across nodes without the hassle of deleting and importing the keys. +If you are looking to delete the validators in one node and import it to another, you can use the [validator-manager](./validator_manager_move.md) to move the validators across nodes without the hassle of deleting and importing the keys. ## Network, Monitoring and Maintenance @@ -389,9 +389,9 @@ expect, there are a few things to check on: ### How do I update lighthouse? -If you are updating to new release binaries, it will be the same process as described [here.](./installation-binaries.md) +If you are updating to new release binaries, it will be the same process as described [here.](./installation_binaries.md) -If you are updating by rebuilding from source, see [here.](./installation-source.md#update-lighthouse) +If you are updating by rebuilding from source, see [here.](./installation_source.md#update-lighthouse) If you are running the docker image provided by Sigma Prime on Dockerhub, you can update to specific versions, for example: @@ -399,7 +399,7 @@ If you are running the docker image provided by Sigma Prime on Dockerhub, you ca docker pull sigp/lighthouse:v1.0.0 ``` -If you are building a docker image, the process will be similar to the one described [here.](./docker.md#building-the-docker-image) +If you are building a docker image, the process will be similar to the one described [here.](./installation_docker.md#building-the-docker-image) You just need to make sure the code you have checked out is up to date. ### Do I need to set up any port mappings (port forwarding)? @@ -436,7 +436,7 @@ Opening these ports will make your Lighthouse node maximally contactable. Apart from using block explorers, you may use the "Validator Monitor" built into Lighthouse which provides logging and Prometheus/Grafana metrics for individual validators. See [Validator -Monitoring](./validator-monitoring.md) for more information. Lighthouse has also developed Lighthouse UI (Siren) to monitor performance, see [Lighthouse UI (Siren)](./lighthouse-ui.md). +Monitoring](./validator_monitoring.md) for more information. Lighthouse has also developed Lighthouse UI (Siren) to monitor performance, see [Lighthouse UI (Siren)](./ui.md). ### My beacon node and validator client are on different servers. How can I point the validator client to the beacon node? @@ -454,7 +454,7 @@ The setting on the beacon node is the same for both cases below. In the beacon n curl "http://local_IP:5052/eth/v1/node/version" ``` - You can refer to [Redundancy](./redundancy.md) for more information. + You can refer to [Redundancy](./advanced_redundancy.md) for more information. 2. If the beacon node and validator clients are on different servers _and different networks_, it is necessary to perform port forwarding of the SSH port (e.g., the default port 22) on the router, and also allow firewall on the SSH port. The connection can be established via port forwarding on the router. @@ -514,11 +514,11 @@ which shows that there are a total of 36 peers connected via QUIC. ### What should I do if I lose my slashing protection database? -See [here](./slashing-protection.md#misplaced-slashing-database). +See [here](./validator_slashing_protection.md#misplaced-slashing-database). ### I can't compile lighthouse -See [here.](./installation-source.md#troubleshooting) +See [here.](./installation_source.md#troubleshooting) ### How do I check the version of Lighthouse that is running? @@ -550,7 +550,7 @@ which says that the version is v4.1.0. ### Does Lighthouse have pruning function like the execution client to save disk space? -Yes, Lighthouse supports [state pruning](./database-migrations.md#how-to-prune-historic-states) which can help to save disk space. +Yes, Lighthouse supports [state pruning](./advanced_database_migrations.md#how-to-prune-historic-states) which can help to save disk space. ### Can I use a HDD for the freezer database and only have the hot db on SSD? diff --git a/book/src/help_bn.md b/book/src/help_bn.md index cebf97023d..a99aae30b1 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -286,7 +286,7 @@ Options: monitoring-endpoint. Default: 60s --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --network-dir Data directory for network keys. Defaults to network/ inside the beacon node dir. @@ -381,6 +381,9 @@ Options: Number of validators per chunk stored on disk. --slots-per-restore-point DEPRECATED. This flag has no effect. + --state-cache-headroom + Minimum number of states to cull from the state cache when it gets + full [default: 1] --state-cache-size Specifies the size of the state cache [default: 32] --suggested-fee-recipient diff --git a/book/src/help_general.md b/book/src/help_general.md index 4d0d4104d4..9c449b2835 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -72,7 +72,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 7fb655910f..fb2c8de876 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -115,7 +115,7 @@ Options: monitoring-endpoint. [default: 60] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --proposer-nodes Comma-separated addresses to one or more beacon node HTTP APIs. These specify nodes that are used to send beacon block proposals. A failure diff --git a/book/src/help_vm.md b/book/src/help_vm.md index 0d9d2a2e4b..3907064696 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -69,7 +69,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 4f3774df10..d83b1000b9 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -78,7 +78,7 @@ Options: If present, the mnemonic will be read in from this file. --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --output-path The path to a directory where the validator and (optionally) deposits files will be created. The directory will be created if it does not diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 28690d3a11..2dd7d5f70a 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -61,7 +61,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --password Password of the keystore file. --prefer-builder-proposals diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index af4a1a4d6d..2f068a1f88 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -65,7 +65,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible - values: mainnet, gnosis, chiado, sepolia, holesky] + values: mainnet, gnosis, chiado, sepolia, holesky, hoodi] --prefer-builder-proposals If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, diff --git a/book/src/imgs/per-epoch.png b/book/src/imgs/per-epoch.png new file mode 100644 index 0000000000..d4ac77ecbb Binary files /dev/null and b/book/src/imgs/per-epoch.png differ diff --git a/book/src/imgs/per-slot.png b/book/src/imgs/per-slot.png new file mode 100644 index 0000000000..91b9c12e4c Binary files /dev/null and b/book/src/imgs/per-slot.png differ diff --git a/book/src/installation.md b/book/src/installation.md index 137a00b918..95550e0807 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -4,18 +4,18 @@ Lighthouse runs on Linux, macOS, and Windows. There are three core methods to obtain the Lighthouse application: -- [Pre-built binaries](./installation-binaries.md). -- [Docker images](./docker.md). -- [Building from source](./installation-source.md). +- [Pre-built binaries](./installation_binaries.md). +- [Docker images](./installation_docker.md). +- [Building from source](./installation_source.md). Additionally, there are two extra guides for specific uses: -- [Raspberry Pi 4 guide](./pi.md). (Archived) -- [Cross-compiling guide for developers](./cross-compiling.md). +- [Raspberry Pi 4 guide](./archived_pi.md). (Archived) +- [Cross-compiling guide for developers](./installation_cross_compiling.md). There are also community-maintained installation methods: -- [Homebrew package](./homebrew.md). +- [Homebrew package](./installation_homebrew.md). - Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). diff --git a/book/src/installation-binaries.md b/book/src/installation_binaries.md similarity index 100% rename from book/src/installation-binaries.md rename to book/src/installation_binaries.md diff --git a/book/src/cross-compiling.md b/book/src/installation_cross_compiling.md similarity index 90% rename from book/src/cross-compiling.md rename to book/src/installation_cross_compiling.md index c90001d561..4f6ba9af38 100644 --- a/book/src/cross-compiling.md +++ b/book/src/installation_cross_compiling.md @@ -34,10 +34,10 @@ in `lighthouse/target/aarch64-unknown-linux-gnu/release`. When using the makefile the set of features used for building can be controlled with the environment variable `CROSS_FEATURES`. See [Feature - Flags](./installation-source.md#feature-flags) for available features. + Flags](./installation_source.md#feature-flags) for available features. ## Compilation Profiles When using the makefile the build profile can be controlled with the environment variable -`CROSS_PROFILE`. See [Compilation Profiles](./installation-source.md#compilation-profiles) for +`CROSS_PROFILE`. See [Compilation Profiles](./installation_source.md#compilation-profiles) for available profiles. diff --git a/book/src/docker.md b/book/src/installation_docker.md similarity index 100% rename from book/src/docker.md rename to book/src/installation_docker.md diff --git a/book/src/homebrew.md b/book/src/installation_homebrew.md similarity index 100% rename from book/src/homebrew.md rename to book/src/installation_homebrew.md diff --git a/book/src/installation-priorities.md b/book/src/installation_priorities.md similarity index 100% rename from book/src/installation-priorities.md rename to book/src/installation_priorities.md diff --git a/book/src/installation-source.md b/book/src/installation_source.md similarity index 95% rename from book/src/installation-source.md rename to book/src/installation_source.md index 19098a5bc8..0aa8a99a5e 100644 --- a/book/src/installation-source.md +++ b/book/src/installation_source.md @@ -23,6 +23,8 @@ The rustup installer provides an easy way to update the Rust compiler, and works With Rust installed, follow the instructions below to install dependencies relevant to your operating system. +> Note: For Linux OS, general Linux File Systems such as Ext4 or XFS are fine. We recommend to avoid using Btrfs file system as it has been reported to be slow and the node will suffer from performance degradation as a result. + ### Ubuntu Install the following packages: @@ -216,7 +218,7 @@ Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of memory during compilation. If you are on a resource-constrained device you can -look into [cross compilation](./cross-compiling.md), or use a [pre-built +look into [cross compilation](./installation_cross_compiling.md), or use a [pre-built binary](https://github.com/sigp/lighthouse/releases). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. diff --git a/book/src/intro.md b/book/src/intro.md index 9892a8a49d..e572904685 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -19,9 +19,9 @@ You may read this book from start to finish, or jump to some of these topics: - Follow the [Installation Guide](./installation.md) to install Lighthouse. - Run your very [own beacon node](./run_a_node.md). -- Learn about [becoming a mainnet validator](./mainnet-validator.md). -- Get hacking with the [Development Environment Guide](./setup.md). -- Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). +- Learn about [becoming a mainnet validator](./mainnet_validator.md). +- Get hacking with the [Development Environment Guide](./contributing_setup.md). +- Utilize the whole stack by starting a [local testnet](./contributing_setup.md#local-testnets). - Query the [RESTful HTTP API](./api.md) using `curl`. Prospective contributors can read the [Contributing](./contributing.md) section diff --git a/book/src/mainnet-validator.md b/book/src/mainnet_validator.md similarity index 96% rename from book/src/mainnet-validator.md rename to book/src/mainnet_validator.md index c53be97ccf..d21d49f0c9 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet_validator.md @@ -1,9 +1,9 @@ # Become an Ethereum Consensus Mainnet Validator [launchpad]: https://launchpad.ethereum.org/ -[advanced-datadir]: ./advanced-datadir.md +[advanced-datadir]: ./advanced_datadir.md [license]: https://github.com/sigp/lighthouse/blob/stable/LICENSE -[slashing]: ./slashing-protection.md +[slashing]: ./validator_slashing_protection.md [discord]: https://discord.gg/cyAszAh Becoming an Ethereum consensus validator is rewarding, but it's not for the faint of heart. You'll need to be @@ -54,7 +54,7 @@ and follow the instructions to generate the keys. When prompted for a network, s Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from staking-deposit-cli can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. -> Lighthouse also supports creating validator keys, see [Key management](./key-management.md) for more info. +> Lighthouse also supports creating validator keys, see [Validator Manager Create](./validator_manager_create.md) for more info. ### Step 2. Start an execution client and Lighthouse beacon node @@ -99,7 +99,7 @@ Enter the keystore password, or press enter to omit it: ``` The user can choose whether or not they'd like to store the validator password -in the [`validator_definitions.yml`](./validator-management.md) file. If the +in the [`validator_definitions.yml`](./validator_management.md) file. If the password is *not* stored here, the validator client (`lighthouse vc`) application will ask for the password each time it starts. This might be nice for some users from a security perspective (i.e., if it is a shared computer), @@ -179,7 +179,7 @@ After the validator is running and performing its duties, it is important to kee The next important thing is to stay up to date with updates to Lighthouse and the execution client. Updates are released from time to time, typically once or twice a month. For Lighthouse updates, you can subscribe to notifications on [Github](https://github.com/sigp/lighthouse) by clicking on `Watch`. If you only want to receive notification on new releases, select `Custom`, then `Releases`. You could also join [Lighthouse Discord](https://discord.gg/cyAszAh) where we will make an announcement when there is a new release. -You may also want to try out [Siren](./lighthouse-ui.md), a UI developed by Lighthouse to monitor validator performance. +You may also want to try out [Siren](./ui.md), a UI developed by Lighthouse to monitor validator performance. Once you are familiar with running a validator and server maintenance, you'll find that running Lighthouse is easy. Install it, start it, monitor it and keep it updated. You shouldn't need to interact with it on a day-to-day basis. Happy staking! diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index 9b9e0cba8e..15567497e5 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -129,7 +129,7 @@ INFO Downloading historical blocks est_time: 5 hrs 0 mins, speed: 111.96 slots/ Once backfill is complete, a `INFO Historical block download complete` log will be emitted. -Check out the [FAQ](./checkpoint-sync.md#faq) for more information on checkpoint sync. +Check out the [FAQ](./advanced_checkpoint_sync.md#faq) for more information on checkpoint sync. ### Logs - Syncing @@ -146,11 +146,10 @@ Once you see the above message - congratulations! This means that your node is s Several other resources are the next logical step to explore after running your beacon node: -- If you intend to run a validator, proceed to [become a validator](./mainnet-validator.md); -- Explore how to [manage your keys](./key-management.md); -- Research on [validator management](./validator-management.md); +- If you intend to run a validator, proceed to [become a validator](./mainnet_validator.md); +- Explore how to [manage your keys](./archived_key_management.md); +- Research on [validator management](./validator_management.md); - Dig into the [APIs](./api.md) that the beacon node and validator client provide; -- Study even more about [checkpoint sync](./checkpoint-sync.md); or -- Investigate what steps had to be taken in the past to execute a smooth [merge migration](./merge-migration.md). +- Study even more about [checkpoint sync](./advanced_checkpoint_sync.md); or Finally, if you are struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md deleted file mode 100644 index 9cd84e5160..0000000000 --- a/book/src/ui-installation.md +++ /dev/null @@ -1,73 +0,0 @@ -# 📦 Installation - -Siren supports any operating system that supports containers and/or NodeJS 18, this includes Linux, macOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren) , but running the application directly is also possible. - -## Version Requirement - -To ensure proper functionality, the Siren app requires Lighthouse v4.3.0 or higher. You can find these versions on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. - -## Running the Docker container (Recommended) - -The most convenient way to run Siren is to use the Docker images built and published by Sigma Prime. - - They can be found on [Docker hub](https://hub.docker.com/r/sigp/siren/tags), or pulled directly with `docker pull sigp/siren` - -Configuration is done through environment variables, the easiest way to get started is by copying `.env.example` to `.env` and editing the relevant sections (typically, this would at least include adding `BEACON_URL`, `VALIDATOR_URL`, `API_TOKEN` and `SESSION_PASSWORD`) - -Then to run the image: - -`docker compose up` -or -`docker run --rm -ti --name siren -p 4443:443 --env-file $PWD/.env sigp/siren` - -This command will open port 4443, allowing your browser to connect. - -To start Siren, visit `https://localhost:4443` in your web browser. - -Advanced users can mount their own certificates, see the `SSL Certificates` section below - -## Building From Source - -### Docker - -The docker image can be built with the following command: -`docker build -f Dockerfile -t siren .` - -### Building locally - -To build from source, ensure that your system has `Node v18.18` and `yarn` installed. - -#### Build and run the backend - -Navigate to the backend directory `cd backend`. Install all required Node packages by running `yarn`. Once the installation is complete, compile the backend with `yarn build`. Deploy the backend in a production environment, `yarn start:production`. This ensures optimal performance. - -#### Build and run the frontend - -After initializing the backend, return to the root directory. Install all frontend dependencies by executing `yarn`. Build the frontend using `yarn build`. Start the frontend production server with `yarn start`. - -This will allow you to access siren at `http://localhost:3000` by default. - -## Advanced configuration - -### About self-signed SSL certificates - -By default, Siren will generate and use a self-signed certificate on startup. -This will generate a security warning when you try to access the interface. -We recommend to only disable SSL if you would access Siren over a local LAN or otherwise highly trusted or encrypted network (i.e. VPN). - -#### Generating persistent SSL certificates and installing them to your system - -[mkcert](https://github.com/FiloSottile/mkcert) is a tool that makes it super easy to generate a self-signed certificate that is trusted by your browser. - -To use it for `siren`, install it following the instructions. Then, run `mkdir certs; mkcert -cert-file certs/cert.pem -key-file certs/key.pem 127.0.0.1 localhost` (add or replace any IP or hostname that you would use to access it at the end of this command) - -The nginx SSL config inside Siren's container expects 3 files: `/certs/cert.pem` `/certs/key.pem` `/certs/key.pass`. If `/certs/cert.pem` does not exist, it will generate a self-signed certificate as mentioned above. If `/certs/cert.pem` does exist, it will attempt to use your provided or persisted certificates. - -### Configuration through environment variables - -For those who prefer to use environment variables to configure Siren instead of using an `.env` file, this is fully supported. In some cases this may even be preferred. - -#### Docker installed through `snap` - -If you installed Docker through a snap (i.e. on Ubuntu), Docker will have trouble accessing the `.env` file. In this case it is highly recommended to pass the config to the container with environment variables. -Note that the defaults in `.env.example` will be used as fallback, if no other value is provided. diff --git a/book/src/lighthouse-ui.md b/book/src/ui.md similarity index 79% rename from book/src/lighthouse-ui.md rename to book/src/ui.md index f2662f4a69..e980e90268 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/ui.md @@ -21,11 +21,11 @@ The UI is currently in active development. It resides in the See the following Siren specific topics for more context-specific information: -- [Configuration Guide](./ui-configuration.md) - Explanation of how to setup +- [Configuration Guide](./ui_configuration.md) - Explanation of how to setup and configure Siren. -- [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. -- [Usage](./ui-usage.md) - Details various Siren components. -- [FAQs](./ui-faqs.md) - Frequently Asked Questions. +- [Authentication Guide](./ui_authentication.md) - Explanation of how Siren authentication works and protects validator actions. +- [Usage](./ui_usage.md) - Details various Siren components. +- [FAQs](./ui_faqs.md) - Frequently Asked Questions. ## Contributing diff --git a/book/src/ui-authentication.md b/book/src/ui_authentication.md similarity index 87% rename from book/src/ui-authentication.md rename to book/src/ui_authentication.md index 81b867bae2..36e3835e3b 100644 --- a/book/src/ui-authentication.md +++ b/book/src/ui_authentication.md @@ -2,12 +2,12 @@ ## Siren Session -For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of the user's validators. The session password must be set during the [configuration](./ui-configuration.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. +For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of the user's validators. The session password must be set during the [configuration](./ui_configuration.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. ![exit](imgs/ui-session.png) ## Protected Actions -Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [configuration process](./ui-configuration.md). +Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [configuration process](./ui_configuration.md). ![exit](imgs/ui-auth.png) diff --git a/book/src/ui-configuration.md b/book/src/ui_configuration.md similarity index 99% rename from book/src/ui-configuration.md rename to book/src/ui_configuration.md index 34cc9fe7ca..64b293372b 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui_configuration.md @@ -29,7 +29,7 @@ We recommend running Siren's container next to your beacon node (on the same ser cd Siren ``` - 1. Create a configuration file in the `Siren` directory: `nano .env` and insert the following fields to the `.env` file. The field values are given here as an example, modify the fields as necessary. For example, the `API_TOKEN` can be obtained from [`Validator Client Authorization Header`](./api-vc-auth-header.md) + 1. Create a configuration file in the `Siren` directory: `nano .env` and insert the following fields to the `.env` file. The field values are given here as an example, modify the fields as necessary. For example, the `API_TOKEN` can be obtained from [`Validator Client Authorization Header`](./api_vc_auth_header.md) A full example with all possible configuration options can be found [here](https://github.com/sigp/siren/blob/stable/.env.example). diff --git a/book/src/ui-faqs.md b/book/src/ui_faqs.md similarity index 92% rename from book/src/ui-faqs.md rename to book/src/ui_faqs.md index 29de889e5f..db365e2fa0 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui_faqs.md @@ -6,11 +6,11 @@ Yes, the most current Siren version requires Lighthouse v4.3.0 or higher to func ## 2. Where can I find my API token? -The required API token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). +The required API token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api_vc_auth_header.md). ## 3. How do I fix the Node Network Errors? -If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui [`configuration`](./ui-configuration.md#configuration). +If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui [`configuration`](./ui_configuration.md#configuration). ## 4. How do I connect Siren to Lighthouse from a different computer on the same network? @@ -19,7 +19,7 @@ That being said, it is entirely possible to have it published over the internet, ## 5. How can I use Siren to monitor my validators remotely when I am not at home? -Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`configuration`](./ui-configuration.md#configuration). +Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`configuration`](./ui_configuration.md#configuration). ## 6. Does Siren support reverse proxy or DNS named addresses? diff --git a/book/src/ui-usage.md b/book/src/ui_usage.md similarity index 100% rename from book/src/ui-usage.md rename to book/src/ui_usage.md diff --git a/book/src/validator-doppelganger.md b/book/src/validator_doppelganger.md similarity index 98% rename from book/src/validator-doppelganger.md rename to book/src/validator_doppelganger.md index a3d60d31b3..006df50bd9 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator_doppelganger.md @@ -1,8 +1,8 @@ # Doppelganger Protection [doppelgänger]: https://en.wikipedia.org/wiki/Doppelg%C3%A4nger -[Slashing Protection]: ./slashing-protection.md -[VC HTTP API]: ./api-vc.md +[Slashing Protection]: ./validator_slashing_protection.md +[VC HTTP API]: ./api_vc.md From Lighthouse `v1.5.0`, the *Doppelganger Protection* feature is available for the Validator Client. Taken from the German *[doppelgänger]*, which translates literally to "double-walker", a diff --git a/book/src/suggested-fee-recipient.md b/book/src/validator_fee_recipient.md similarity index 96% rename from book/src/suggested-fee-recipient.md rename to book/src/validator_fee_recipient.md index 4a9be7b963..2b125f5033 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/validator_fee_recipient.md @@ -82,7 +82,7 @@ validator client in order for the execution node to be given adequate notice of ## Setting the fee recipient dynamically using the keymanager API -When the [validator client API](api-vc.md) is enabled, the +When the [validator client API](api_vc.md) is enabled, the [standard keymanager API](https://ethereum.github.io/keymanager-APIs/) includes an endpoint for setting the fee recipient dynamically for a given public key. When used, the fee recipient will be saved in `validator_definitions.yml` so that it persists across restarts of the validator @@ -92,7 +92,7 @@ client. |-------------------|--------------------------------------------| | Path | `/eth/v1/validator/{pubkey}/feerecipient` | | Method | POST | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 202, 404 | ### Example Request Body @@ -117,7 +117,7 @@ curl -X POST \ http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq ``` -Note that an authorization header is required to interact with the API. This is specified with the header `-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)"` which read the API token to supply the authentication. Refer to [Authorization Header](./api-vc-auth-header.md) for more information. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. +Note that an authorization header is required to interact with the API. This is specified with the header `-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)"` which read the API token to supply the authentication. Refer to [Authorization Header](./api_vc_auth_header.md) for more information. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. #### Successful Response (202) @@ -135,7 +135,7 @@ The same path with a `GET` request can be used to query the fee recipient for a |-------------------|--------------------------------------------| | Path | `/eth/v1/validator/{pubkey}/feerecipient` | | Method | GET | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200, 404 | Command: @@ -170,7 +170,7 @@ This is useful if you want the fee recipient to fall back to the validator clien |-------------------|--------------------------------------------| | Path | `/eth/v1/validator/{pubkey}/feerecipient` | | Method | DELETE | -| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 204, 404 | Command: diff --git a/book/src/graffiti.md b/book/src/validator_graffiti.md similarity index 95% rename from book/src/graffiti.md rename to book/src/validator_graffiti.md index 7b402ea866..9908d056da 100644 --- a/book/src/graffiti.md +++ b/book/src/validator_graffiti.md @@ -32,7 +32,7 @@ Lighthouse will first search for the graffiti corresponding to the public key of Users can set validator specific graffitis in `validator_definitions.yml` with the `graffiti` key. This option is recommended for static setups where the graffitis won't change on every new block proposal. -You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http). +You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api_vc_endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http). Below is an example of the validator_definitions.yml with validator specific graffitis: @@ -74,11 +74,11 @@ Usage: `lighthouse bn --graffiti fortytwo` ## Set Graffiti via HTTP -Use the [Lighthouse API](api-vc-endpoints.md) to set graffiti on a per-validator basis. This method updates the graffiti +Use the [Lighthouse API](api_vc_endpoints.md) to set graffiti on a per-validator basis. This method updates the graffiti both in memory and in the `validator_definitions.yml` file. The new graffiti will be used in the next block proposal without requiring a validator client restart. -Refer to [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey) for API specification. +Refer to [Lighthouse API](api_vc_endpoints.html#patch-lighthousevalidatorsvoting_pubkey) for API specification. ### Example Command diff --git a/book/src/validator-management.md b/book/src/validator_management.md similarity index 98% rename from book/src/validator-management.md rename to book/src/validator_management.md index b9610b6967..18abfb1538 100644 --- a/book/src/validator-management.md +++ b/book/src/validator_management.md @@ -13,7 +13,7 @@ standard directories and do not start their `lighthouse vc` with the this document. However, users with more complex needs may find this document useful. -The [lighthouse validator-manager](./validator-manager.md) command can be used +The [lighthouse validator-manager](./validator_manager.md) command can be used to create and import validators to a Lighthouse VC. It can also be used to move validators between two Lighthouse VCs. @@ -54,7 +54,7 @@ Each permitted field of the file is listed below for reference: - `enabled`: A `true`/`false` indicating if the validator client should consider this validator "enabled". - `voting_public_key`: A validator public key. -- `type`: How the validator signs messages (this can be `local_keystore` or `web3signer` (see [Web3Signer](./validator-web3signer.md))). +- `type`: How the validator signs messages (this can be `local_keystore` or `web3signer` (see [Web3Signer](./advanced_web3signer.md))). - `voting_keystore_path`: The path to a EIP-2335 keystore. - `voting_keystore_password_path`: The path to the password for the EIP-2335 keystore. - `voting_keystore_password`: The password to the EIP-2335 keystore. diff --git a/book/src/validator-manager.md b/book/src/validator_manager.md similarity index 93% rename from book/src/validator-manager.md rename to book/src/validator_manager.md index 11df2af037..c610340b39 100644 --- a/book/src/validator-manager.md +++ b/book/src/validator_manager.md @@ -30,6 +30,6 @@ The `validator-manager` boasts the following features: ## Guides -- [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md) -- [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) -- [Managing validators such as delete, import and list validators.](./validator-manager-api.md) +- [Creating and importing validators using the `create` and `import` commands.](./validator_manager_create.md) +- [Moving validators between two VCs using the `move` command.](./validator_manager_move.md) +- [Managing validators such as delete, import and list validators.](./validator_manager_api.md) diff --git a/book/src/validator-manager-api.md b/book/src/validator_manager_api.md similarity index 100% rename from book/src/validator-manager-api.md rename to book/src/validator_manager_api.md diff --git a/book/src/validator-manager-create.md b/book/src/validator_manager_create.md similarity index 98% rename from book/src/validator-manager-create.md rename to book/src/validator_manager_create.md index b4c86dc6da..458907bc65 100644 --- a/book/src/validator-manager-create.md +++ b/book/src/validator_manager_create.md @@ -69,7 +69,7 @@ lighthouse \ > Be sure to remove `./validators.json` after the import is successful since it > contains unencrypted validator keystores. -> Note: To import validators with validator-manager using keystore files created using the staking deposit CLI, refer to [Managing Validators](./validator-manager-api.md#import). +> Note: To import validators with validator-manager using keystore files created using the staking deposit CLI, refer to [Managing Validators](./validator_manager_api.md#import). ## Detailed Guide @@ -179,7 +179,7 @@ INFO Modified key_cache saved successfully The WARN message means that the `validators.json` file does not contain the slashing protection data. This is normal if you are starting a new validator. The flag `--enable-doppelganger-protection` will also protect users from potential slashing risk. The validators will now go through 2-3 epochs of [doppelganger -protection](./validator-doppelganger.md) and will automatically start performing +protection](./validator_doppelganger.md) and will automatically start performing their duties when they are deposited and activated. If the host VC contains the same public key as the `validators.json` file, an error will be shown and the `import` process will stop: diff --git a/book/src/validator-manager-move.md b/book/src/validator_manager_move.md similarity index 100% rename from book/src/validator-manager-move.md rename to book/src/validator_manager_move.md diff --git a/book/src/validator-monitoring.md b/book/src/validator_monitoring.md similarity index 98% rename from book/src/validator-monitoring.md rename to book/src/validator_monitoring.md index bbc95460ec..d7f00521c4 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator_monitoring.md @@ -5,7 +5,7 @@ Generally users will want to use this function to track their own validators, ho used for any validator, regardless of who controls it. _Note: If you are looking for remote metric monitoring, please see the docs on -[Prometheus Metrics](./advanced_metrics.md)_. +[Prometheus Metrics](./api_metrics.md)_. ## Monitoring is in the Beacon Node @@ -64,7 +64,7 @@ lighthouse bn --validator-monitor-pubkeys 0x933ad9491b62059dd065b560d256d8957a8c Enrolling a validator for additional monitoring results in: - Additional logs to be printed during BN operation. -- Additional [Prometheus metrics](./advanced_metrics.md) from the BN. +- Additional [Prometheus metrics](./api_metrics.md) from the BN. ### Logging diff --git a/book/src/slashing-protection.md b/book/src/validator_slashing_protection.md similarity index 97% rename from book/src/slashing-protection.md rename to book/src/validator_slashing_protection.md index 2d580f1c31..3e0fe184e5 100644 --- a/book/src/slashing-protection.md +++ b/book/src/validator_slashing_protection.md @@ -22,9 +22,9 @@ and carefully to keep your validators safe. See the [Troubleshooting](#troublesh The database will be automatically created, and your validators registered with it when: * Importing keys from another source (e.g. [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases), Lodestar, Nimbus, Prysm, Teku, [ethdo](https://github.com/wealdtech/ethdo)). - See [import validator keys](./mainnet-validator.md#step-3-import-validator-keys-to-lighthouse). + See [import validator keys](./mainnet_validator.md#step-3-import-validator-keys-to-lighthouse). * Creating keys using Lighthouse itself (`lighthouse account validator create`) -* Creating keys via the [validator client API](./api-vc.md). +* Creating keys via the [validator client API](./api_vc.md). ## Avoiding Slashing @@ -79,7 +79,7 @@ lighthouse account validator slashing-protection import filename.json ``` When importing an interchange file, you still need to import the validator keystores themselves -separately, using the instructions for [import validator keys](./mainnet-validator.md#step-3-import-validator-keys-to-lighthouse). +separately, using the instructions for [import validator keys](./mainnet_validator.md#step-3-import-validator-keys-to-lighthouse). --- diff --git a/book/src/partial-withdrawal.md b/book/src/validator_sweep.md similarity index 75% rename from book/src/partial-withdrawal.md rename to book/src/validator_sweep.md index 26003e1f2f..b707988e84 100644 --- a/book/src/partial-withdrawal.md +++ b/book/src/validator_sweep.md @@ -1,15 +1,15 @@ -# Partial Withdrawals +# Validator "Sweeping" (Automatic Partial Withdrawals) After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023: - if a validator has a withdrawal credential type `0x00`, the rewards will continue to accumulate and will be locked in the beacon chain. -- if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. At the time of writing, with 560,000+ validators on the Ethereum mainnet, you shall expect to receive the rewards approximately every 5 days. +- if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. The validator sweep is automatic and it does not incur any fees to withdraw. ## FAQ 1. How to know if I have the withdrawal credentials type `0x00` or `0x01`? - Refer [here](./voluntary-exit.md#1-how-to-know-if-i-have-the-withdrawal-credentials-type-0x01). + Refer [here](./validator_voluntary_exit.md#1-how-to-know-if-i-have-the-withdrawal-credentials-type-0x01). 2. My validator has withdrawal credentials type `0x00`, is there a deadline to update my withdrawal credentials? @@ -17,7 +17,7 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12 3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`? - No. The "validator sweep" occurs automatically and you can expect to receive the rewards every *n* days, [more information here](./voluntary-exit.md#4-when-will-i-get-my-staked-fund-after-voluntary-exit-if-my-validator-is-of-type-0x01). + No. The "validator sweep" occurs automatically and you can expect to receive the rewards every *n* days, [more information here](./validator_voluntary_exit.md#4-when-will-i-get-my-staked-fund-after-voluntary-exit-if-my-validator-is-of-type-0x01). Figure below summarizes partial withdrawals. diff --git a/book/src/voluntary-exit.md b/book/src/validator_voluntary_exit.md similarity index 100% rename from book/src/voluntary-exit.md rename to book/src/validator_voluntary_exit.md diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 362b598c9f..c9f0c04fca 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" authors = ["Sigma Prime "] edition = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 358fe12a84..1c55220b50 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -781,6 +781,45 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `GET beacon/states/{state_id}/pending_deposits` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_pending_deposits( + &self, + state_id: StateId, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("pending_deposits"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/pending_partial_withdrawals` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_pending_partial_withdrawals( + &self, + state_id: StateId, + ) -> Result>>, Error> + { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("pending_partial_withdrawals"); + + self.get_opt(path).await + } + /// `GET beacon/light_client/updates` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 5cdbf80b05..9839fcfda4 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1404,6 +1404,13 @@ pub struct StandardLivenessResponseData { pub is_live: bool, } +#[derive(Debug, Serialize, Deserialize)] +pub struct ManualFinalizationRequestData { + pub state_root: Hash256, + pub epoch: Epoch, + pub block_root: Hash256, +} + #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 50386feb8a..017bdf288d 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -35,6 +35,17 @@ const HOLESKY_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url genesis_state_root: "0x0ea3f6f9515823b59c863454675fefcd1d8b4f2dbe454db166206a41fda060a0", }; +const HOODI_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { + urls: &[ + // This is an AWS S3 bucket hosted by Sigma Prime. See Paul Hauner for + // more details. + "https://sigp-public-genesis-states.s3.ap-southeast-2.amazonaws.com/hoodi/", + ], + checksum: "0x7f42257ef69e055496c964a753bb07e54001ccd57ab467ef72d67af086bcfce7", + genesis_validators_root: "0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f", + genesis_state_root: "0x2683ebc120f91f740c7bed4c866672d01e1ba51b4cc360297138465ee5df40f0", +}; + const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { // No default checkpoint sources are provided. urls: &[], @@ -328,5 +339,14 @@ define_hardcoded_nets!( "holesky", // Describes how the genesis state can be obtained. HOLESKY_GENESIS_STATE_SOURCE + ), + ( + // Network name (must be unique among all networks). + hoodi, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "hoodi", + // Describes how the genesis state can be obtained. + HOODI_GENESIS_STATE_SOURCE ) ); diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml new file mode 100644 index 0000000000..33eaa7e8a9 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml @@ -0,0 +1,13 @@ +# hoodi consensus layer bootnodes +# --------------------------------------- +# 1. Tag nodes with maintainer +# 2. Keep nodes updated +# 3. Review PRs: check ENR duplicates, fork-digest, connection. + +# EF +- enr:-Mq4QLkmuSwbGBUph1r7iHopzRpdqE-gcm5LNZfcE-6T37OCZbRHi22bXZkaqnZ6XdIyEDTelnkmMEQB8w6NbnJUt9GGAZWaowaYh2F0dG5ldHOIABgAAAAAAACEZXRoMpDS8Zl_YAAJEAAIAAAAAAAAgmlkgnY0gmlwhNEmfKCEcXVpY4IyyIlzZWNwMjU2azGhA0hGa4jZJZYQAS-z6ZFK-m4GCFnWS8wfjO0bpSQn6hyEiHN5bmNuZXRzAIN0Y3CCIyiDdWRwgiMo +- enr:-Ku4QLVumWTwyOUVS4ajqq8ZuZz2ik6t3Gtq0Ozxqecj0qNZWpMnudcvTs-4jrlwYRQMQwBS8Pvtmu4ZPP2Lx3i2t7YBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhNEmfKCJc2VjcDI1NmsxoQLdRlI8aCa_ELwTJhVN8k7km7IDc3pYu-FMYBs5_FiigIN1ZHCCIyk +- enr:-LK4QAYuLujoiaqCAs0-qNWj9oFws1B4iy-Hff1bRB7wpQCYSS-IIMxLWCn7sWloTJzC1SiH8Y7lMQ5I36ynGV1ASj4Eh2F0dG5ldHOIYAAAAAAAAACEZXRoMpDS8Zl_YAAJEAAIAAAAAAAAgmlkgnY0gmlwhIbRilSJc2VjcDI1NmsxoQOmI5MlAu3f5WEThAYOqoygpS2wYn0XS5NV2aYq7T0a04N0Y3CCIyiDdWRwgiMo +- enr:-Ku4QIC89sMC0o-irosD4_23lJJ4qCGOvdUz7SmoShWx0k6AaxCFTKviEHa-sa7-EzsiXpDp0qP0xzX6nKdXJX3X-IQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhIbRilSJc2VjcDI1NmsxoQK_m0f1DzDc9Cjrspm36zuRa7072HSiMGYWLsKiVSbP34N1ZHCCIyk +- enr:-Ku4QNkWjw5tNzo8DtWqKm7CnDdIq_y7xppD6c1EZSwjB8rMOkSFA1wJPLoKrq5UvA7wcxIotH6Usx3PAugEN2JMncIBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhIbHuBeJc2VjcDI1NmsxoQP3FwrhFYB60djwRjAoOjttq6du94DtkQuaN99wvgqaIYN1ZHCCIyk +- enr:-OS4QMJGE13xEROqvKN1xnnt7U-noc51VXyM6wFMuL9LMhQDfo1p1dF_zFdS4OsnXz_vIYk-nQWnqJMWRDKvkSK6_CwDh2F0dG5ldHOIAAAAADAAAACGY2xpZW502IpMaWdodGhvdXNljDcuMC4wLWJldGEuM4RldGgykNLxmX9gAAkQAAgAAAAAAACCaWSCdjSCaXCEhse4F4RxdWljgiMqiXNlY3AyNTZrMaECef77P8k5l3PC_raLw42OAzdXfxeQ-58BJriNaqiRGJSIc3luY25ldHMAg3RjcIIjKIN1ZHCCIyg diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml new file mode 100644 index 0000000000..19d7797424 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml @@ -0,0 +1,165 @@ +# Extends the mainnet preset +PRESET_BASE: mainnet +CONFIG_NAME: hoodi + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 +# 2025-Mar-17 12:00:00 PM UTC +MIN_GENESIS_TIME: 1742212800 +GENESIS_FORK_VERSION: 0x10000910 +GENESIS_DELAY: 600 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x20000910 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x30000910 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x40000910 +CAPELLA_FORK_EPOCH: 0 + +# DENEB +DENEB_FORK_VERSION: 0x50000910 +DENEB_FORK_EPOCH: 0 + +# Electra +ELECTRA_FORK_VERSION: 0x60000910 +ELECTRA_FORK_EPOCH: 2048 + +# Fulu +FULU_FORK_VERSION: 0x70000910 +FULU_FORK_EPOCH: 18446744073709551615 + + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 12 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 560048 +DEPOSIT_NETWORK_ID: 560048 +DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +## `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 + +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(6)` +TARGET_BLOBS_PER_BLOCK_ELECTRA: 6 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + +# Whisk +# `Epoch(2**8)` +WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 +# `Epoch(2)` +WHISK_PROPOSER_SELECTION_GAP: 2 + +# Fulu +NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + +# EIP7732 +MAX_REQUEST_PAYLOADS: 128 diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/deposit_contract_block.txt b/common/eth2_network_config/built_in_network_configs/hoodi/deposit_contract_block.txt new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/hoodi/deposit_contract_block.txt @@ -0,0 +1 @@ +0 diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index cfffdbbb09..1c62cd7b8a 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.0.0-beta.0-", - fallback = "Lighthouse/v7.0.0-beta.0" + prefix = "Lighthouse/v7.0.0-beta.4-", + fallback = "Lighthouse/v7.0.0-beta.4" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.0.0-beta.0" + "7.0.0-beta.4" } /// Returns the name of the current client running. diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 39615cd656..403f682a06 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -140,14 +140,20 @@ pub fn create_libp2p_discv5_tracing_layer( discv5_writer = discv5_writer.compression(Compression::Gzip); } - let Ok(libp2p_writer) = libp2p_writer.build() else { - eprintln!("Failed to initialize libp2p rolling file appender"); - std::process::exit(1); + let libp2p_writer = match libp2p_writer.build() { + Ok(writer) => writer, + Err(e) => { + eprintln!("Failed to initialize libp2p rolling file appender: {e}"); + std::process::exit(1); + } }; - let Ok(discv5_writer) = discv5_writer.build() else { - eprintln!("Failed to initialize discv5 rolling file appender"); - std::process::exit(1); + let discv5_writer = match discv5_writer.build() { + Ok(writer) => writer, + Err(e) => { + eprintln!("Failed to initialize discv5 rolling file appender: {e}"); + std::process::exit(1); + } }; let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer); diff --git a/common/logging/src/sse_logging_components.rs b/common/logging/src/sse_logging_components.rs index e358fde6c6..a25b5be6c5 100644 --- a/common/logging/src/sse_logging_components.rs +++ b/common/logging/src/sse_logging_components.rs @@ -1,3 +1,4 @@ +// TODO(tracing) fix the comments below and remove reference of slog::Drain //! This module provides an implementation of `slog::Drain` that optionally writes to a channel if //! there are subscribers to a HTTP SSE stream. diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 28a3ecdd02..91b44c7af1 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1256,6 +1256,11 @@ where .is_finalized_checkpoint_or_descendant::(block_root) } + pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { + self.proto_array + .is_descendant(ancestor_root, descendant_root) + } + /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. /// /// Returns `Ok(false)` if `block_root`'s execution payload has been elected as fully VALID, if diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index b224cde048..95bdee574d 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -25,6 +25,9 @@ pub type E = MainnetEthSpec; pub const VALIDATOR_COUNT: usize = 64; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// Defines some delay between when an attestation is created and when it is mutated. pub enum MutationDelay { /// No delay between creation and mutation. @@ -373,7 +376,7 @@ impl ForkChoiceTest { let state = harness .chain .store - .get_state(&state_root, None) + .get_state(&state_root, None, CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); let balances = state diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index c59449634a..34e9ff120d 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -22,6 +22,9 @@ pub const VALIDATOR_COUNT: usize = 64; pub const EPOCH_OFFSET: u64 = 4; pub const NUM_ATTESTATIONS: u64 = 1; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + /// A cached set of keys. static KEYPAIRS: LazyLock> = LazyLock::new(|| generate_deterministic_keypairs(MAX_VALIDATOR_COUNT)); @@ -1114,9 +1117,10 @@ async fn block_replayer_peeking_state_roots() { .get_blinded_block(&parent_block_root) .unwrap() .unwrap(); + // Cache the state to make CI go brr. let parent_state = harness .chain - .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .get_state(&parent_block.state_root(), Some(parent_block.slot()), true) .unwrap() .unwrap(); diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 0dd91edc3c..e1fce47975 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -213,12 +213,16 @@ impl LightClientUpdate { .map_err(|_| Error::InconsistentFork)? { ForkName::Base => return Err(Error::AltairForkNotActive), - ForkName::Altair | ForkName::Bellatrix => { + fork_name @ ForkName::Altair | fork_name @ ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderAltair::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderAltair::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderAltair::default() + } } else { LightClientHeaderAltair::default() }; @@ -233,12 +237,16 @@ impl LightClientUpdate { signature_slot: block_slot, }) } - ForkName::Capella => { + fork_name @ ForkName::Capella => { let attested_header = LightClientHeaderCapella::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderCapella::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderCapella::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderCapella::default() + } } else { LightClientHeaderCapella::default() }; @@ -253,12 +261,16 @@ impl LightClientUpdate { signature_slot: block_slot, }) } - ForkName::Deneb => { + fork_name @ ForkName::Deneb => { let attested_header = LightClientHeaderDeneb::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderDeneb::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderDeneb::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderDeneb::default() + } } else { LightClientHeaderDeneb::default() }; @@ -273,12 +285,16 @@ impl LightClientUpdate { signature_slot: block_slot, }) } - ForkName::Electra => { + fork_name @ ForkName::Electra => { let attested_header = LightClientHeaderElectra::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderElectra::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderElectra::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderElectra::default() + } } else { LightClientHeaderElectra::default() }; @@ -293,12 +309,16 @@ impl LightClientUpdate { signature_slot: block_slot, }) } - ForkName::Fulu => { + fork_name @ ForkName::Fulu => { let attested_header = LightClientHeaderFulu::block_to_light_client_header(attested_block)?; let finalized_header = if let Some(finalized_block) = finalized_block { - LightClientHeaderFulu::block_to_light_client_header(finalized_block)? + if finalized_block.fork_name_unchecked() == fork_name { + LightClientHeaderFulu::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderFulu::default() + } } else { LightClientHeaderFulu::default() }; diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index b7c226f8cd..c4f4d1699c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 5bfd2233f0..05f4900c46 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -553,6 +553,15 @@ fn main() { until Prague is triggered on mainnet.") .display_order(0) ) + .arg( + Arg::new("osaka-time") + .long("osaka-time") + .value_name("UNIX_TIMESTAMP") + .action(ArgAction::Set) + .help("The payload timestamp that enables Osaka. No default is provided \ + until Osaka is triggered on mainnet.") + .display_order(0) + ) ) .subcommand( Command::new("http-sync") diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index d941293e91..b968440f44 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.0.0-beta.0" +version = "7.0.0-beta.4" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 005d2734c7..f427836751 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -248,7 +248,7 @@ impl EnvironmentBuilder { config.log_format.clone(), config.logfile_format.clone(), config.extra_info, - true, + false, ) } Err(e) => { @@ -264,7 +264,7 @@ impl EnvironmentBuilder { config.log_format.clone(), config.logfile_format.clone(), config.extra_info, - true, + false, ) } } @@ -280,7 +280,7 @@ impl EnvironmentBuilder { config.log_format.clone(), config.logfile_format.clone(), config.extra_info, - true, + false, ) } }; diff --git a/lighthouse/environment/src/tracing_common.rs b/lighthouse/environment/src/tracing_common.rs index ad9060a8ff..893f50dae5 100644 --- a/lighthouse/environment/src/tracing_common.rs +++ b/lighthouse/environment/src/tracing_common.rs @@ -3,7 +3,7 @@ use clap::ArgMatches; use logging::Libp2pDiscv5TracingLayer; use logging::{tracing_logging_layer::LoggingLayer, SSELoggingComponents}; use std::process; -use tracing_subscriber::filter::{EnvFilter, FilterFn, LevelFilter}; +use tracing_subscriber::filter::{FilterFn, LevelFilter}; use types::EthSpec; pub fn construct_logger( @@ -12,7 +12,6 @@ pub fn construct_logger( environment_builder: EnvironmentBuilder, ) -> ( EnvironmentBuilder, - EnvFilter, Libp2pDiscv5TracingLayer, LoggingLayer, LoggingLayer, @@ -32,16 +31,11 @@ pub fn construct_logger( let (builder, file_logging_layer, stdout_logging_layer, sse_logging_layer_opt) = environment_builder.init_tracing(logger_config.clone(), logfile_prefix); - let filter_layer = EnvFilter::try_from_default_env() - .or_else(|_| EnvFilter::try_new(logger_config.debug_level.to_string().to_lowercase())) - .unwrap(); - let dependency_log_filter = FilterFn::new(filter_dependency_log as fn(&tracing::Metadata<'_>) -> bool); ( builder, - filter_layer, libp2p_discv5_layer, file_logging_layer, stdout_logging_layer, diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 8df4831503..a2432e282d 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -592,7 +592,6 @@ fn run( let ( builder, - filter_layer, libp2p_discv5_layer, file_logging_layer, stdout_logging_layer, @@ -622,7 +621,6 @@ fn run( let logging = tracing_subscriber::registry() .with(dependency_log_filter) - .with(filter_layer) .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) .with(stdout_logging_layer.with_filter(logger_config.debug_level)) .with(MetricsLayer) diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index d9b40ffb1f..1fb9e40c23 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1883,6 +1883,21 @@ fn state_cache_size_flag() { .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(64))); } #[test] +fn state_cache_headroom_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.state_cache_headroom, new_non_zero_usize(1))); +} +#[test] +fn state_cache_headroom_flag() { + CommandLineTest::new() + .flag("state-cache-headroom", Some("16")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.store.state_cache_headroom, new_non_zero_usize(16)) + }); +} +#[test] fn historic_state_cache_size_flag() { CommandLineTest::new() .flag("historic-state-cache-size", Some("4")) @@ -2797,3 +2812,43 @@ fn data_column_publishing_delay_for_testing() { ); }); } + +#[test] +fn invalid_block_roots_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + let mut file = + File::create(dir.path().join("invalid-block-roots")).expect("Unable to create file"); + file.write_all(b"2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f359, 2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f358, 0x3db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f358") + .expect("Unable to write to file"); + CommandLineTest::new() + .flag( + "invalid-block-roots", + dir.path().join("invalid-block-roots").as_os_str().to_str(), + ) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.invalid_block_roots.len(), 3)) +} + +#[test] +fn invalid_block_roots_default_holesky() { + use beacon_node::beacon_chain::chain_config::INVALID_HOLESKY_BLOCK_ROOT; + CommandLineTest::new() + .flag("network", Some("holesky")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.invalid_block_roots.len(), 1); + assert!(config + .chain + .invalid_block_roots + .contains(&*INVALID_HOLESKY_BLOCK_ROOT)); + }) +} + +#[test] +fn invalid_block_roots_default_mainnet() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(config.chain.invalid_block_roots.is_empty()); + }) +} diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 2f97cdf5b9..01c87b40fc 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -29,6 +29,9 @@ use types::{ IndexedAttestation, KzgProof, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, }; +// When set to true, cache any states fetched from the db. +pub const CACHE_STATE_IN_TESTS: bool = true; + #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] #[serde(deny_unknown_fields)] pub struct PowBlock { @@ -545,10 +548,15 @@ impl Tester { .unwrap() { let parent_state_root = parent_block.state_root(); + let mut state = self .harness .chain - .get_state(&parent_state_root, Some(parent_block.slot())) + .get_state( + &parent_state_root, + Some(parent_block.slot()), + CACHE_STATE_IN_TESTS, + ) .unwrap() .unwrap(); diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index fff5c71a87..4cd599f845 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -90,7 +90,6 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { let ( env_builder, - filter_layer, _libp2p_discv5_layer, file_logging_layer, stdout_logging_layer, @@ -119,7 +118,6 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { ); if let Err(e) = tracing_subscriber::registry() - .with(filter_layer) .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) .with(stdout_logging_layer.with_filter(logger_config.debug_level)) .with(MetricsLayer) diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 03cc17fab3..35c2508b53 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -264,6 +264,11 @@ pub(crate) async fn verify_light_client_updates( let slot = Slot::new(slot); let previous_slot = slot - 1; + let sync_committee_period = slot + .epoch(E::slots_per_epoch()) + .sync_committee_period(&E::default_spec()) + .unwrap(); + let previous_slot_block = client .get_beacon_blocks::(BlockId::Slot(previous_slot)) .await @@ -329,6 +334,20 @@ pub(crate) async fn verify_light_client_updates( "Existing finality update too old: signature slot {signature_slot}, current slot {slot:?}" )); } + + let light_client_updates = client + .get_beacon_light_client_updates::(sync_committee_period, 1) + .await + .map_err(|e| format!("Error while getting light client update: {:?}", e))? + .ok_or(format!("Light client update not found {slot:?}"))?; + + // Ensure we're only storing a single light client update for the given sync committee period + if light_client_updates.len() != 1 { + return Err(format!( + "{} light client updates was returned when only one was expected.", + light_client_updates.len() + )); + } } Ok(()) diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 98a6a34ffa..384699c64c 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -94,7 +94,6 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { let ( env_builder, - filter_layer, libp2p_discv5_layer, file_logging_layer, stdout_logging_layer, @@ -124,7 +123,6 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { if let Err(e) = tracing_subscriber::registry() .with(dependency_log_filter) - .with(filter_layer) .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) .with(stdout_logging_layer.with_filter(logger_config.debug_level)) .with(libp2p_discv5_layer) diff --git a/testing/validator_test_rig/Cargo.toml b/testing/validator_test_rig/Cargo.toml index bdbdac95d8..f28a423433 100644 --- a/testing/validator_test_rig/Cargo.toml +++ b/testing/validator_test_rig/Cargo.toml @@ -5,7 +5,6 @@ edition = { workspace = true } [dependencies] eth2 = { workspace = true } -logging = { workspace = true } mockito = { workspace = true } regex = { workspace = true } sensitive_url = { workspace = true } diff --git a/wordlist.txt b/wordlist.txt index bb8b46b525..7adbfe9032 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -12,6 +12,7 @@ BN BNs BTC BTEC +Btrfs Casper CentOS Chiado @@ -38,6 +39,7 @@ Exercism Extractable FFG Geth +GiB Gitcoin Gnosis Goerli @@ -91,6 +93,7 @@ TLS TODOs UDP UI +Uncached UPnP USD UX @@ -100,6 +103,7 @@ VCs VPN Withdrawable WSL +XFS YAML aarch anonymize @@ -196,6 +200,7 @@ redb reimport resync roadmap +routable rustfmt rustup schemas @@ -220,6 +225,7 @@ tweakers ui unadvanced unaggregated +uncached unencrypted unfinalized untrusted